repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wzbozon/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
irblsensitivity/irblsensitivity | scripts/analysis/MWU_Project_EMSE.py | 1 | 9231 | #-*- coding: utf-8 -*-
'''
Created on 2017. 02. 12
Updated on 2017. 02. 12
'''
from __future__ import print_function
import os
import re
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from scipy.stats import mannwhitneyu, pearsonr
from ExpBase import ExpBase
import numpy as np
from commons import Subjects
class MWUTest(ExpBase):
techniques = ['BugLocator', 'BRTracer', 'BLUiR', 'AmaLgam', 'BLIA', 'Locus']
validDigits = {
'AvgLOC': 2, 'InvNSrc': 4, 'AvgCC': 4, 'SrcAvgDistTk': 2, 'SrcAvgNTk': 2, 'SrcRatioDict': 4, 'NSrc': 2, 'SrcNumCmt': 4, 'SrcNDistTk': 0, 'SrcLocalDistTk': 3, 'SrcRatioCmt': 4, 'SrcNumMhd': 4, 'RatioEnum': 4,
'RepAvgTk': 2, 'NReport': 0, 'RepNDistTk': 0, 'RepAvgDistTk': 3, 'RepAvgLocalTk':4, 'RepAvgCE': 4, 'RatioCode': 4, 'RatioSTrace': 4, '|STinterRT|': 0,
'AvgMinIRf': 4, 'AvgMaxIRf': 4, 'AvgMeanIRf': 4, 'KSDist': 4, 'AvgUIRf': 4, 'AvgProdIRf': 4, 'hasCE': 4,
'hasSTrace': 4, 'hasCR': 4, 'hasEnum': 4,
'NTk':2, 'NDistTk':3, 'NLocalTk':4, 'NDistCE':3
}
featureorders = {
'01': ['AvgLOC', 'AvgCC', 'SrcAvgNTk', 'SrcAvgDistTk', 'SrcLocalDistTk', 'SrcNDistTk', 'NSrc', 'InvNSrc',
'SrcNumMhd',
'SrcNumCmt', 'SrcRatioCmt', 'SrcRatioDict'],
'02': ['RatioEnum', 'RatioSTrace', 'RatioCode', 'RepNDistTk', 'RepAvgTk', 'RepAvgDistTk', 'RepAvgLocalTk', 'RepAvgCE',
'NReport'],
'03': ['|STinterRT|', 'KSDist', 'AvgProdIRf', 'AvgMinIRf', 'AvgMaxIRf', 'AvgMeanIRf', 'AvgUIRf'],
'04': ['hasEnum', 'hasSTrace', 'hasCR', 'hasCE'],
'05': ['NTk', 'NDistTk', 'NLocalTk', 'NDistCE']
}
def MWUtest(self, _dataA, _dataB, _bugsA=None, _bugsB=None):
'''
Mann-Whitney U Test between IRBL technique results
:param _nameA: The results of Type A
:param _nameB: The results of Type B
:param _bugsA: the count of bugs for each techniques
:param _bugsB: the count of bugs for each techniques
:return: {technique : pvalue, techinique: pvalue, ...}
'''
results = {}
for idx in range(len(self.techniques)):
filteredDataA = [items[idx] for items in _dataA.values()]
filteredDataB = [items[idx] for items in _dataB.values()]
#filteredDataA, labels = self.get_array_items(_dataA, idx)
#filteredDataB, labels = self.get_array_items(_dataB, idx)
if _bugsA is not None:
if isinstance(_bugsA, dict) is True:
filteredDataA += ([0] * (_bugsA[self.techniques[idx]] - len(filteredDataA)))
else:
filteredDataA += ([0] * (_bugsA - len(filteredDataA)))
if _bugsB is not None:
if isinstance(_bugsB, dict) is True:
filteredDataB += ([0] * (_bugsB[self.techniques[idx]] - len(filteredDataB)))
else:
filteredDataB += ([0] * (_bugsB - len(filteredDataB)))
#slope, intercept, r_value, p_value, stderr = stats.linregress(dataMAP, dataFeature)
t_statistic, t_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided')
l_statistic, l_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='less')
g_statistic, g_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='greater')
pvalue = min(t_pvalue , l_pvalue, g_pvalue)
#statistic, pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided') # 'less', 'two-sided', 'greater'
results[self.techniques[idx]] = pvalue
return results
def get_technique_averages(self, _source, _counts):
'''
:param _source: project's bug results dict
:param _count: original bug counts for each technique
:return:
'''
results = {}
for idx in range(len(self.techniques)):
sumValue = 0
for itemID, item in _source.iteritems():
sumValue += item[idx]
results[self.techniques[idx]] = sumValue / float(_counts[self.techniques[idx]])
return results
def compare_single_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_averages(_itemType):
results = {}
for tData in ['Old', 'New_Single']:
filepath = os.path.join(_basepath, u'%s_%s.txt' % (tData, _itemType))
titles, data = self.load_results_items(filepath, ['str'] * 3 + ['float'] * 6)
for group in data:
if group not in results: results[group] = {}
for project in data[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(data[group][project], CNTs)
return results
APresults = get_averages('AP')
TPresults = get_averages('TP')
features = self.extract_features(_basepath)
print(u'Technique Mann-Whitney U Test p-values')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
S.groups.append(u'Previous')
S.projects[u'Previous'] = [u'AspectJ', u'ZXing', u'PDE', u'JDT', u'SWT']
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f}' % TPresults[group][project][tech]
# if group in features:
# for fid in [u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk']:
# text += u' & %.4f' % features[group][project][fid]
# text += u' \\\\'
# else:
# text += u' & & & & \\\\'
text += u' \\\\'
print(text)
pass
def compare_multi_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_average_mwu(_itemType):
results = {}
multi = os.path.join(_basepath, u'New_Multiple_%s.txt' % _itemType)
titles, dataM = self.load_results_items(multi, ['str'] * 3 + ['float'] * 6)
# MWUresults = {}
# single = os.path.join(_basepath, u'New_Single_%s.txt' % _itemType)
# titles, dataS = self.load_results_items(single, ['str'] * 3 + ['float'] * 6)
for group in dataM:
if group not in results: results[group] = {}
#if group not in MWUresults: MWUresults[group] = {}
for project in dataM[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(dataM[group][project], CNTs)
#MWUresults[group][project] = self.MWUtest(dataS[group][project], dataM[group][project], CNTs, CNTs)
return results #, MWUresults
APresults = get_average_mwu('AP')
TPresults = get_average_mwu('TP')
print(u'')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f ' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f} ' % TPresults[group][project][tech]
print(text, end=u'')
print(u' \\\\')
pass
def extract_features(self, _basepath):
titles, data = self.load_results(os.path.join(_basepath, u'02_PW_Bug_Features.txt'), ['str'] * 2 + ['int'] + ['float'] * 3 + ['int', 'float'] )
for group in data:
for project in data[group]:
item = data[group][project]
data[group][project] = dict(zip([u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk'], [item[1], item[2], item[3], item[5]]))
return data
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
basepath = u'/mnt/exp/Bug/analysis/'
obj = MWUTest()
obj.compare_multi_results(basepath)
obj.compare_single_results(basepath)
# obj.compare_test(basepath)
#obj.calc_pearson(basepath)
#obj.compare_dup_results(basepath)
| apache-2.0 |
fmfn/UnbalancedDataset | examples/under-sampling/plot_illustration_tomek_links.py | 2 | 3180 | """
==============================================
Illustration of the definition of a Tomek link
==============================================
This example illustrates what is a Tomek link.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# This function allows to make nice plotting
# %%
def make_plot_despine(ax):
sns.despine(ax=ax, offset=10)
ax.set_xlim([0, 3])
ax.set_ylim([0, 3])
ax.set_xlabel(r"$X_1$")
ax.set_ylabel(r"$X_2$")
ax.legend(loc="lower right")
# %% [markdown]
# We will generate some toy data that illustrates how
# :class:`~imblearn.under_sampling.TomekLinks` is used to clean a dataset.
# %%
import numpy as np
rng = np.random.RandomState(18)
X_minority = np.transpose(
[[1.1, 1.3, 1.15, 0.8, 0.55, 2.1], [1.0, 1.5, 1.7, 2.5, 0.55, 1.9]]
)
X_majority = np.transpose(
[
[2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],
[1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],
]
)
# %% [markdown]
# In the figure above, the samples highlighted in green form a Tomek link since
# they are of different classes and are nearest neighbors of each other.
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
make_plot_despine(ax)
fig.suptitle("Illustration of a Tomek link")
fig.tight_layout()
# %% [markdown]
# We can run the :class:`~imblearn.under_sampling.TomekLinks` sampling to
# remove the corresponding samples. If `sampling_strategy='auto'` only the
# sample from the majority class will be removed. If `sampling_strategy='all'`
# both samples will be removed.
# %%
from imblearn.under_sampling import TomekLinks
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
samplers = {
"Removing only majority samples": TomekLinks(sampling_strategy="auto"),
"Removing all samples": TomekLinks(sampling_strategy="all"),
}
for ax, (title, sampler) in zip(axs, samplers.items()):
X_res, y_res = sampler.fit_resample(
np.vstack((X_minority, X_majority)),
np.array([0] * X_minority.shape[0] + [1] * X_majority.shape[0]),
)
ax.scatter(
X_res[y_res == 0][:, 0],
X_res[y_res == 0][:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_res[y_res == 1][:, 0],
X_res[y_res == 1][:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
ax.set_title(title)
make_plot_despine(ax)
fig.tight_layout()
plt.show()
| mit |
waylonflinn/bquery | bquery/benchmarks/bench_groupby.py | 2 | 2465 | from __future__ import print_function
# bench related imports
import numpy as np
import shutil
import bquery
import pandas as pd
import itertools as itt
import cytoolz
import cytoolz.dicttoolz
from toolz import valmap, compose
from cytoolz.curried import pluck
import blaze as blz
# other imports
import contextlib
import os
import time
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
t_elapsed = 0.0
@contextlib.contextmanager
def ctime(message=None):
"Counts the time spent in some context"
global t_elapsed
t_elapsed = 0.0
print('\n')
t = time.time()
yield
if message:
print(message + ": ", end='')
t_elapsed = time.time() - t
print(round(t_elapsed, 4), "sec")
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
print('Rows: ', n_rows)
# -- data
z = np.fromiter(((a, b, x, y) for a, b, x, y in izip(ga, gb, gx, gy)),
dtype='S2,S2,i8,i8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir, )
print(ct)
# -- pandas --
df = pd.DataFrame(z)
with ctime(message='pandas'):
result = df.groupby(['f0'])['f2'].sum()
print(result)
t_pandas = t_elapsed
# -- cytoolz --
with ctime(message='cytoolz over bcolz'):
# In Memory Split-Apply-Combine
# http://toolz.readthedocs.org/en/latest/streaming-analytics.html?highlight=reduce#split-apply-combine-with-groupby-and-reduceby
r = cytoolz.groupby(lambda row: row.f0, ct)
result = valmap(compose(sum, pluck(2)), r)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- blaze + bcolz --
blaze_data = blz.Data(ct.rootdir)
expr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())
with ctime(message='blaze over bcolz'):
result = blz.compute(expr)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- bquery --
with ctime(message='bquery over bcolz'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
ct.cache_factor(['f0'], refresh=True)
with ctime(message='bquery over bcolz (factorization cached)'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
shutil.rmtree(rootdir)
| bsd-3-clause |
HiSPARC/sapphire | scripts/simulations/analyze_shower_front.py | 1 | 5153 | import numpy as np
import tables
from scipy.optimize import curve_fit
from scipy.stats import scoreatpercentile
from artist import GraphArtist
from pylab import *
import matplotlib.pyplot as plt
import utils
USE_TEX = False
# For matplotlib plots
if USE_TEX:
rcParams['font.serif'] = 'Computer Modern'
rcParams['font.sans-serif'] = 'Computer Modern'
rcParams['font.family'] = 'sans-serif'
rcParams['figure.figsize'] = [4 * x for x in (1, 2. / 3)]
rcParams['figure.subplot.left'] = 0.175
rcParams['figure.subplot.bottom'] = 0.175
rcParams['font.size'] = 10
rcParams['legend.fontsize'] = 'small'
rcParams['text.usetex'] = True
def main():
global data
data = tables.open_file('master-ch4v2.h5', 'r')
#utils.set_suffix('E_1PeV')
#scatterplot_core_distance_vs_time()
#median_core_distance_vs_time()
boxplot_core_distance_vs_time()
#hists_core_distance_vs_time()
plot_front_passage()
def scatterplot_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
plt.loglog(electrons[:]['core_distance'], electrons[:]['arrival_time'], ',')
plt.xlim(1e0, 1e2)
plt.ylim(1e-3, 1e3)
plt.xlabel("Core distance [m]")
plt.ylabel("Arrival time [ns]")
utils.title("Shower front timing structure")
utils.saveplot()
def median_core_distance_vs_time():
plt.figure()
plot_and_fit_statistic(lambda a: scoreatpercentile(a, 25))
plot_and_fit_statistic(lambda a: scoreatpercentile(a, 75))
utils.title("Shower front timing structure (25, 75 %)")
utils.saveplot()
plt.xlabel("Core distance [m]")
plt.ylabel("Median arrival time [ns]")
legend(loc='lower right')
def plot_and_fit_statistic(func):
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
bins = np.logspace(0, 2, 25)
x, y = [], []
for low, high in zip(bins[:-1], bins[1:]):
sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')
statistic = func(sel[:]['arrival_time'])
x.append(np.mean([low, high]))
y.append(statistic)
plt.loglog(x, y)
logx = log10(x)
logy = log10(y)
logf = lambda x, a, b: a * x + b
g = lambda x, a, b: 10 ** logf(log10(x), a, b)
popt, pcov = curve_fit(logf, logx, logy)
plot(x, g(x, *popt), label="f(x) = %.2e * x ^ %.2e" % (10 ** popt[1],
popt[0]))
def boxplot_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0.shower_0
leptons = sim.leptons
#bins = np.logspace(0, 2, 25)
bins = np.linspace(0, 100, 15)
x, arrival_time, widths = [], [], []
t25, t50, t75 = [], [], []
for low, high in zip(bins[:-1], bins[1:]):
sel = leptons.read_where('(low < core_distance) & (core_distance <= high)')
x.append(np.mean([low, high]))
arrival_time.append(sel[:]['arrival_time'])
widths.append((high - low) / 2)
ts = sel[:]['arrival_time']
t25.append(scoreatpercentile(ts, 25))
t50.append(scoreatpercentile(ts, 50))
t75.append(scoreatpercentile(ts, 75))
fill_between(x, t25, t75, color='0.75')
plot(x, t50, 'o-', color='black')
plt.xlabel("Core distance [m]")
plt.ylabel("Arrival time [ns]")
#utils.title("Shower front timing structure")
utils.saveplot()
graph = GraphArtist()
graph.plot(x, t50, linestyle=None)
graph.shade_region(x, t25, t75)
graph.set_xlabel(r"Core distance [\si{\meter}]")
graph.set_ylabel(r"Arrival time [\si{\nano\second}]")
graph.set_ylimits(0, 30)
graph.set_xlimits(0, 100)
graph.save('plots/front-passage-vs-R')
def hists_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
bins = np.logspace(0, 2, 5)
for low, high in zip(bins[:-1], bins[1:]):
sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')
arrival_time = sel[:]['arrival_time']
plt.hist(arrival_time, bins=np.logspace(-2, 3, 50), histtype='step',
label="%.2f <= log10(R) < %.2f" % (np.log10(low),
np.log10(high)))
plt.xscale('log')
plt.xlabel("Arrival Time [ns]")
plt.ylabel("Count")
plt.legend(loc='upper left')
utils.title("Shower front timing structure")
utils.saveplot()
def plot_front_passage():
sim = data.root.showers.E_1PeV.zenith_0.shower_0
leptons = sim.leptons
R = 40
dR = 2
low = R - dR
high = R + dR
global t
t = leptons.read_where('(low < core_distance) & (core_distance <= high)',
field='arrival_time')
n, bins, patches = hist(t, bins=linspace(0, 30, 31), histtype='step')
graph = GraphArtist()
graph.histogram(n, bins)
graph.set_xlabel(r"Arrival time [\si{\nano\second}]")
graph.set_ylabel("Number of leptons")
graph.set_ylimits(min=0)
graph.set_xlimits(0, 30)
graph.save('plots/front-passage')
if __name__ == '__main__':
main()
| gpl-3.0 |
jmbeuken/abinit | scripts/post_processing/abinit_eignc_to_bandstructure.py | 3 | 47417 | #!/usr/bin/python
#=================================================================#
# Script to plot the bandstructure from an abinit bandstructure #
# _EIG.nc netcdf file or from a wannier bandstructure, or from #
# an _EIG.nc file+GW file+ bandstructure _EIG.nc file #
#=================================================================#
#########
#IMPORTS#
#########
import numpy as N
import matplotlib.pyplot as P
import netCDF4 as nc
import sys
import os
import argparse
import time
#############
##VARIABLES##
#############
class VariableContainer:pass
#Constants
csts = VariableContainer()
csts.hartree2ev = N.float(27.211396132)
csts.ev2hartree = N.float(1/csts.hartree2ev)
csts.sqrtpi = N.float(N.sqrt(N.pi))
csts.invsqrtpi = N.float(1/csts.sqrtpi)
csts.TOLKPTS = N.float(0.00001)
###########
##CLASSES##
###########
class PolynomialFit(object):
def __init__(self):
self.degree = 2
class EigenvalueContainer(object):
nsppol = None
nkpt = None
mband = None
eigenvalues = None
units = None
wtk = None
filename = None
filefullpath = None
bd_indices = None
eigenvalue_type = None
kpoints = None
#kpoint_sampling_type: can be Monkhorst-Pack or Bandstructure
KPT_W90_TOL = N.float(1.0e-6)
KPT_DFT_TOL = N.float(1.0e-8)
kpoint_sampling_type = 'Monkhorst-Pack'
inputgvectors = None
gvectors = None
special_kpoints = None
special_kpoints_names = None
special_kpoints_indices = None
kpoint_path_values = None
kpoint_reduced_path_values = None
kpoint_path_length = None
#reduced_norm = None
norm_paths = None
norm_reduced_paths = None
def __init__(self,directory=None,filename=None):
if filename == None:return
if directory == None:directory='.'
self.filename = filename
self.filefullpath = '%s/%s' %(directory,filename)
self.file_open(self.filefullpath)
def set_kpoint_sampling_type(self,kpoint_sampling_type):
if kpoint_sampling_type != 'Monkhorst-Pack' and kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type "%s" does not exists' %kpoint_sampling_type
print ' it should be "Monkhorst-Pack" or "Bandstructure" ... exit'
sys.exit()
self.kpoint_sampling_type = kpoint_sampling_type
def correct_kpt(self,kpoint,tolerance=N.float(1.0e-6)):
kpt_correct = N.array(kpoint,N.float)
changed = False
for ii in range(3):
if N.allclose(kpoint[ii],N.float(1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/3.0)
changed = True
elif N.allclose(kpoint[ii],N.float(1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/3.0)
changed = True
if changed:
print 'COMMENT: kpoint %15.12f %15.12f %15.12f has been changed to %15.12f %15.12f %15.12f' %(kpoint[0],kpoint[1],kpoint[2],kpt_correct[0],kpt_correct[1],kpt_correct[2])
return kpt_correct
def find_special_kpoints(self,gvectors=None):
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: special kpoints are usefull only for bandstructures ... returning find_special_kpoints'
return
if self.eigenvalue_type == 'W90':
correct_kpt_tolerance = N.float(1.0e-4)
KPT_TOL = self.KPT_W90_TOL
elif self.eigenvalue_type == 'DFT':
correct_kpt_tolerance = N.float(1.0e-6)
KPT_TOL = self.KPT_DFT_TOL
else:
print 'ERROR: eigenvalue_type is "%s" while it should be "W90" or "DFT" ... returning find_special_kpoints' %self.eigenvalue_type
return
if gvectors == None:
self.inputgvectors = False
self.gvectors = N.identity(3,N.float)
else:
if N.shape(gvectors) != (3, 3):
print 'ERROR: wrong gvectors ... exiting now'
sys.exit()
self.inputgvectors = True
self.gvectors = gvectors
full_kpoints = N.zeros((self.nkpt,3),N.float)
for ikpt in range(self.nkpt):
full_kpoints[ikpt,:] = self.kpoints[ikpt,0]*self.gvectors[0,:]+self.kpoints[ikpt,1]*self.gvectors[1,:]+self.kpoints[ikpt,2]*self.gvectors[2,:]
delta_kpt = full_kpoints[1,:]-full_kpoints[0,:]
self.special_kpoints_indices = list()
self.special_kpoints = list()
self.special_kpoints_indices.append(0)
self.special_kpoints.append(self.correct_kpt(self.kpoints[0,:],tolerance=correct_kpt_tolerance))
for ikpt in range(1,self.nkpt-1):
thisdelta = full_kpoints[ikpt+1,:]-full_kpoints[ikpt,:]
if not N.allclose(thisdelta,delta_kpt,atol=KPT_TOL):
delta_kpt = thisdelta
self.special_kpoints_indices.append(ikpt)
self.special_kpoints.append(self.correct_kpt(self.kpoints[ikpt,:],tolerance=correct_kpt_tolerance))
self.special_kpoints_indices.append(N.shape(self.kpoints)[0]-1)
self.special_kpoints.append(self.correct_kpt(self.kpoints[-1,:],tolerance=correct_kpt_tolerance))
print 'Special Kpoints : '
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(1,self.kpoints[0,:])
self.norm_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
self.norm_reduced_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
self.norm_paths[ispkpt-1] = N.linalg.norm(full_kpoints[self.special_kpoints_indices[ispkpt]]-full_kpoints[self.special_kpoints_indices[ispkpt-1]])
self.norm_reduced_paths[ispkpt-1] = N.linalg.norm(self.special_kpoints[ispkpt]-self.special_kpoints[ispkpt-1])
print ' {2:d}-{3:d} path length : {0: 8.8f} | reduced path length : {1: 8.8f}'.\
format(self.norm_paths[ispkpt-1],self.norm_reduced_paths[ispkpt-1],ispkpt,ispkpt+1)
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(ispkpt+1,self.kpoints[self.special_kpoints_indices[ispkpt],:])
self.kpoint_path_length = N.sum(self.norm_paths)
self.kpoint_reduced_path_length = N.sum(self.norm_reduced_paths)
self.normalized_kpoint_path_norm = self.norm_paths/self.kpoint_path_length
self.normalized_kpoint_reduced_path_norm = self.norm_reduced_paths/self.kpoint_reduced_path_length
kptredpathval = list()
kptpathval = list()
kptredpathval.append(N.float(0.0))
kptpathval.append(N.float(0.0))
curlen = N.float(0.0)
redcurlen = N.float(0.0)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
kptredpathval.extend(N.linspace(redcurlen,redcurlen+self.norm_reduced_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
kptpathval.extend(N.linspace(curlen,curlen+self.norm_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
redcurlen = redcurlen + self.norm_reduced_paths[ispkpt-1]
curlen = curlen + self.norm_paths[ispkpt-1]
self.kpoint_path_values = N.array(kptpathval,N.float)
self.kpoint_reduced_path_values = N.array(kptredpathval,N.float)
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.special_kpoints = N.array(self.special_kpoints,N.float)
def file_open(self,filefullpath):
if filefullpath[-3:] == '_GW':
self.gw_file_open(filefullpath)
elif filefullpath[-7:] == '_EIG.nc':
self.nc_eig_open(filefullpath)
elif filefullpath[-4:] == '.dat':
self.wannier_bs_file_open(filefullpath)
def has_eigenvalue(self,nsppol,isppol,kpoint,iband):
if self.nsppol != nsppol:
return False
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
if iband >= self.bd_indices[isppol,ikpt,0]-1 and iband < self.bd_indices[isppol,ikpt,1]:
return True
return False
return False
def get_eigenvalue(self,nsppol,isppol,kpoint,iband):
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
return self.eigenvalues[isppol,ikpt,iband]
def wannier_bs_file_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
print 'WARNING: no spin polarization reading yet for Wannier90 bandstructure files!'
self.eigenvalue_type = 'W90'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
reader = open(self.filefullpath,'r')
filedata = reader.readlines()
reader.close()
for iline in range(len(filedata)):
if filedata[iline].strip() == '':
self.nkpt = iline
break
self.mband = N.int(len(filedata)/self.nkpt)
self.nsppol = 1
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
self.kpoints = N.zeros([self.nkpt,3],N.float)
iline = 0
kpt_file = '%s.kpt' %filefullpath[:-4]
if os.path.isfile(kpt_file):
reader = open(kpt_file,'r')
kptdata = reader.readlines()
reader.close()
if N.int(kptdata[0]) != self.nkpt:
print 'ERROR : the number of kpoints in file "%s" is not the same as in "%s" ... exit' %(self.filefullpath,kpt_file)
sys.exit()
for ikpt in range(self.nkpt):
linesplit = kptdata[ikpt+1].split()
self.kpoints[ikpt,0] = N.float(linesplit[0])
self.kpoints[ikpt,1] = N.float(linesplit[1])
self.kpoints[ikpt,2] = N.float(linesplit[2])
else:
for ikpt in range(self.nkpt):
self.kpoints[ikpt,0] = N.float(filedata[ikpt].split()[0])
for iband in range(self.mband):
for ikpt in range(self.nkpt):
self.eigenvalues[0,ikpt,iband] = N.float(filedata[iline].split()[1])
iline = iline+1
iline = iline+1
self.eigenvalues = self.eigenvalues*csts.ev2hartree
self.units = 'Hartree'
def gw_file_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
self.eigenvalue_type = 'GW'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
reader = open(self.filefullpath,'r')
filedata = reader.readlines()
reader.close()
self.nkpt = N.int(filedata[0].split()[0])
self.kpoints = N.ones([self.nkpt,3],N.float)
self.nsppol = N.int(filedata[0].split()[1])
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
icur = 1
nbd_kpt = N.zeros([self.nsppol,self.nkpt],N.int)
for isppol in range(self.nsppol):
for ikpt in range(self.nkpt):
self.kpoints[ikpt,:] = N.array(filedata[icur].split()[:],N.float)
icur = icur + 1
nbd_kpt[isppol,ikpt] = N.int(filedata[icur])
self.bd_indices[isppol,ikpt,0] = N.int(filedata[icur+1].split()[0])
self.bd_indices[isppol,ikpt,1] = N.int(filedata[icur+nbd_kpt[isppol,ikpt]].split()[0])
icur = icur + nbd_kpt[isppol,ikpt] + 1
self.mband = N.max(self.bd_indices[:,:,1])
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
self.eigenvalues[:,:,:] = N.nan
ii = 3
for isppol in range(self.nsppol):
for ikpt in range(self.nkpt):
for iband in range(self.bd_indices[isppol,ikpt,0]-1,self.bd_indices[isppol,ikpt,1]):
self.eigenvalues[isppol,ikpt,iband] = N.float(filedata[ii].split()[1])
ii = ii + 1
ii = ii + 2
self.eigenvalues = csts.ev2hartree*self.eigenvalues
self.units = 'Hartree'
def pfit_gw_file_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
writer.write('%12s%12s\n' %(self.nkpt,self.nsppol))
if gwec == None:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
else:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
if gwec.has_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband):
gw_eig = gwec.get_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband)
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*gw_eig,csts.hartree2ev*(gw_eig-self.eigenvalues[isppol,ikpt,iband]),0.0))
else:
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
writer.close()
def pfit_dft_to_gw_bs_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
if gwec == None:
for ikpt in range(self.nkpt):
writer.write('%s' %ikpt)
for isppol in range(self.nsppol):
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write(' %s' %(csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta))
writer.write('\n')
else:
print 'NOT SUPPORTED YET'
sys.exit()
writer.close()
def nc_eig_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
ncdata = nc.Dataset(filefullpath)
self.eigenvalue_type = 'DFT'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
for dimname,dimobj in ncdata.dimensions.iteritems():
if dimname == 'nsppol':self.nsppol = N.int(len(dimobj))
if dimname == 'nkpt':self.nkpt = N.int(len(dimobj))
if dimname == 'mband':self.mband = N.int(len(dimobj))
for varname in ncdata.variables:
if varname == 'Eigenvalues':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.units = None
for attrname in varobj.ncattrs():
if attrname == 'units':
self.units = varobj.getncattr(attrname)
if self.units == None:
print 'WARNING : units are not specified'
print '... assuming "Hartree" units ...'
self.units = 'Hartree'
elif self.units != 'Hartree':
print 'ERROR : units are unknown : "%s"' %self.units
print '... exiting now ...'
sys.exit()
self.eigenvalues = N.reshape(N.array(varobj,N.float),varshape)
self.nsppol = varshape[0]
self.nkpt = varshape[1]
self.kpoints = -1*N.ones((self.nkpt,3),N.float)
self.mband = varshape[2]
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
self.bd_indices[:,:,0] = 1
self.bd_indices[:,:,1] = self.mband
break
for varname in ncdata.variables:
if varname == 'Kptns':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.kpoints = N.reshape(N.array(varobj,N.float),varshape)
def write_bandstructure_to_file(self,filename,option_kpts='bohrm1_units'):
#if option_kpts is set to 'normalized', the path of the bandstructure will be normalized to 1 (and special k-points correctly chosen)
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type is not "Bandstructure" ... returning from write_bandstructure_to_file'
return
if self.nsppol > 1:
print 'ERROR: number of spins is more than 1, this is not fully tested ... use with care !'
writer = open(filename,'w')
writer.write('# BANDSTRUCTURE FILE FROM DAVID\'S SCRIPT\n')
writer.write('# nsppol = %s\n' %self.nsppol)
writer.write('# nband = %s\n' %self.mband)
writer.write('# eigenvalue_type = %s\n' %self.eigenvalue_type)
if self.inputgvectors:
writer.write('# inputgvectors = 1 (%s)\n' %self.inputgvectors)
else:
writer.write('# inputgvectors = 0 (%s)\n' %self.inputgvectors)
writer.write('# gvectors(1) = %20.17f %20.17f %20.17f \n' %(self.gvectors[0,0],self.gvectors[0,1],self.gvectors[0,2]))
writer.write('# gvectors(2) = %20.17f %20.17f %20.17f \n' %(self.gvectors[1,0],self.gvectors[1,1],self.gvectors[1,2]))
writer.write('# gvectors(3) = %20.17f %20.17f %20.17f \n' %(self.gvectors[2,0],self.gvectors[2,1],self.gvectors[2,2]))
writer.write('# special_kpoints_number = %s\n' %(len(self.special_kpoints_indices)))
writer.write('# list of special kpoints : (given in reduced coordinates, value_path is in Bohr^-1, value_red_path has its total path normalized to 1)\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_index %5s : %20.17f %20.17f %20.17f (value_path = %20.17f | value_red_path = %20.17f)\n' %(ispkpt,spkpt[0],spkpt[1],spkpt[2],self.kpoint_path_values[ispkpt],self.kpoint_reduced_path_values[ispkpt]))
writer.write('# special_kpoints_names :\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_name %3s : "%s" : %20.17f %20.17f %20.17f\n' %(ii+1,self.special_kpoints_names[ii],spkpt[0],spkpt[1],spkpt[2]))
writer.write('# kpoint_path_length = %20.17f \n' %(self.kpoint_path_length))
writer.write('# kpoint_path_number = %s \n' %(self.nkpt))
if self.inputgvectors:
writer.write('# kpoint_path_units = %s\n' %(option_kpts))
else:
writer.write('# kpoint_path_units = %s (!!! CONSIDERING UNITARY GVECTORS MATRIX !!!)\n' %(option_kpts))
writer.write('#BEGIN\n')
if option_kpts == 'bohrm1_units':
values_path = self.kpoint_path_values
elif option_kpts == 'reduced':
values_path = self.kpoint_reduced_path_values
elif option_kpts == 'bohrm1_units_normalized':
values_path = self.normalized_kpoint_path_values
elif option_kpts == 'reduced_normalized':
values_path = self.normalized_kpoint_reduced_path_values
else:
print 'ERROR: wrong option_kpts ... exit'
writer.write('... CANCELLED (wrong option_kpts)')
writer.close()
sys.exit()
for isppol in range(self.nsppol):
writer.write('#isppol %s\n' %isppol)
for iband in range(self.mband):
writer.write('#iband %5s (band number %s)\n' %(iband,iband+1))
for ikpt in range(self.nkpt):
writer.write('%20.17f %20.17f\n' %(values_path[ikpt],self.eigenvalues[isppol,ikpt,iband]))
writer.write('\n')
writer.write('#END\n')
writer.write('\n#KPT_LIST\n')
for ikpt in range(self.nkpt):
writer.write('# %6d : %20.17f %20.17f %20.17f\n' %(ikpt,self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.close()
def read_bandstructure_from_file(self,filename):
reader = open(filename,'r')
bs_data = reader.readlines()
reader.close()
self.gvectors = N.identity(3,N.float)
self.kpoint_sampling_type = 'Bandstructure'
self.special_kpoints_indices = list()
self.special_kpoints = list()
for ii in range(len(bs_data)):
if bs_data[ii] == '#BEGIN\n':
ibegin = ii
break
elif bs_data[ii][:10] == '# nsppol =':
self.nsppol = N.int(bs_data[ii][10:])
elif bs_data[ii][:9] == '# nband =':
self.mband = N.int(bs_data[ii][9:])
elif bs_data[ii][:19] == '# eigenvalue_type =':
self.eigenvalue_type = bs_data[ii][19:].strip()
elif bs_data[ii][:17] == '# inputgvectors =':
tt = N.int(bs_data[ii][18])
if tt == 1:
self.inputgvectors = True
elif tt == 0:
self.inputgvectors = False
else:
print 'ERROR: reading inputgvectors ... exit'
sys.exit()
elif bs_data[ii][:15] == '# gvectors(1) =':
sp = bs_data[ii][15:].split()
self.gvectors[0,0] = N.float(sp[0])
self.gvectors[0,1] = N.float(sp[1])
self.gvectors[0,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(2) =':
sp = bs_data[ii][15:].split()
self.gvectors[1,0] = N.float(sp[0])
self.gvectors[1,1] = N.float(sp[1])
self.gvectors[1,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(3) =':
sp = bs_data[ii][15:].split()
self.gvectors[2,0] = N.float(sp[0])
self.gvectors[2,1] = N.float(sp[1])
self.gvectors[2,2] = N.float(sp[2])
elif bs_data[ii][:26] == '# special_kpoints_number =':
special_kpoints_number = N.int(bs_data[ii][26:])
self.special_kpoints_names = ['']*special_kpoints_number
elif bs_data[ii][:22] == '# special_kpt_index':
sp = bs_data[ii][22:].split()
self.special_kpoints_indices.append(N.int(sp[0]))
self.special_kpoints.append(N.array([sp[2],sp[3],sp[4]]))
elif bs_data[ii][:21] == '# special_kpt_name':
sp = bs_data[ii][21:].split()
ispkpt = N.int(sp[0])-1
self.special_kpoints_names[ispkpt] = sp[2][1:-1]
elif bs_data[ii][:22] == '# kpoint_path_length =':
self.kpoint_path_length = N.float(bs_data[ii][22:])
elif bs_data[ii][:22] == '# kpoint_path_number =':
self.nkpt = N.int(bs_data[ii][22:])
elif bs_data[ii][:21] == '# kpoint_path_units =':
kpoint_path_units = bs_data[ii][21:].strip()
self.special_kpoints_indices = N.array(self.special_kpoints_indices,N.int)
self.special_kpoints = N.array(self.special_kpoints,N.float)
if len(self.special_kpoints_indices) != special_kpoints_number or len(self.special_kpoints) != special_kpoints_number:
print 'ERROR: reading the special kpoints ... exit'
sys.exit()
self.kpoint_path_values = N.zeros([self.nkpt],N.float)
self.kpoint_reduced_path_values = N.zeros([self.nkpt],N.float)
if kpoint_path_units == 'bohrm1_units':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
if kpoint_path_units == 'bohrm1_units_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_path_values = self.normalized_kpoint_path_values*self.kpoint_path_length
elif kpoint_path_units == 'reduced_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_reduced_path_values = self.normalized_kpoint_reduced_path_values/self.kpoint_reduced_path_length
elif kpoint_path_units == 'reduced':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
check_nband = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol':
isppol = N.int(bs_data[ii][7:])
elif bs_data[ii][:6] == '#iband':
iband = N.int(bs_data[ii][6:].split()[0])
ikpt = 0
elif bs_data[ii][:4] == '#END':
break
elif bs_data[ii] == '\n':
check_nband = check_nband + 1
else:
self.eigenvalues[isppol,ikpt,iband] = N.float(bs_data[ii].split()[1])
ikpt = ikpt + 1
def check_gw_vs_dft_parameters(dftec,gwec):
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue files do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
for ikpt in range(dftec.nkpt):
if N.absolute(dftec.kpoints[ikpt,0]-gwec.kpoints[ikpt,0]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,1]-gwec.kpoints[ikpt,1]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,2]-gwec.kpoints[ikpt,2]) > csts.TOLKPTS:
print 'ERROR: the kpoints are not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
def plot_gw_vs_dft_eig(dftec,gwec,vbm_index,energy_pivots=None,polyfit_degrees=None):
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue containers do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT containers ... exiting now'
sys.exit()
valdftarray = N.array([],N.float)
conddftarray = N.array([],N.float)
valgwarray = N.array([],N.float)
condgwarray = N.array([],N.float)
for isppol in range(dftec.nsppol):
for ikpt in range(dftec.nkpt):
ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1
ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1
valdftarray = N.append(valdftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
valgwarray = N.append(valgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
conddftarray = N.append(conddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
condgwarray = N.append(condgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
if energy_pivots == None:
if plot_figures == 1:
P.figure(1)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray,'bx')
P.plot(conddftarray,condgwarray,'rx')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW eigenvalues (in eV)')
P.figure(2)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx')
P.plot(conddftarray,condgwarray-conddftarray,'rx')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.show()
return
polyfitlist = list()
if len(polyfit_degrees) == 1:
print 'ERROR: making a fit with only one interval is not allowed ... exiting now'
sys.exit()
dftarray = N.append(valdftarray,conddftarray)
gwarray = N.append(valgwarray,condgwarray)
dftarray_list = list()
gwarray_list = list()
for iinterval in range(len(polyfit_degrees)):
tmpdftarray = N.array([],N.float)
tmpgwarray = N.array([],N.float)
if iinterval == 0:
emin = None
emax = energy_pivots[0]
for ii in range(len(dftarray)):
if dftarray[ii] <= emax:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
elif iinterval == len(polyfit_degrees)-1:
emin = energy_pivots[-1]
emax = None
for ii in range(len(dftarray)):
if dftarray[ii] >= emin:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
else:
emin = energy_pivots[iinterval-1]
emax = energy_pivots[iinterval]
for ii in range(len(dftarray)):
if dftarray[ii] >= emin and dftarray[ii] <= emax:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
dftarray_list.append(tmpdftarray)
gwarray_list.append(tmpgwarray)
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees[iinterval])
polyfitlist.append(pfit)
if plot_figures == 1:
linspace_npoints = 200
valpoly_x = N.linspace(N.min(valdftarray),N.max(valdftarray),linspace_npoints)
condpoly_x = N.linspace(N.min(conddftarray),N.max(conddftarray),linspace_npoints)
P.figure(3)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx')
P.plot(conddftarray,condgwarray-conddftarray,'rx')
[x_min,x_max] = P.xlim()
for iinterval in range(len(polyfit_degrees)):
if iinterval == 0:
tmppoly_x = N.linspace(x_min,energy_pivots[iinterval],linspace_npoints)
elif iinterval == len(polyfit_degrees)-1:
tmppoly_x = N.linspace(energy_pivots[iinterval-1],x_max,linspace_npoints)
else:
tmppoly_x = N.linspace(energy_pivots[iinterval-1],energy_pivots[iinterval],linspace_npoints)
P.plot(tmppoly_x,N.polyval(polyfitlist[iinterval],tmppoly_x),'k')
for ipivot in range(len(energy_pivots)):
en = energy_pivots[ipivot]
P.plot([en,en],[N.polyval(polyfitlist[ipivot],en),N.polyval(polyfitlist[ipivot+1],en)],'k-.')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.figure(4)
P.hold(True)
P.grid(True)
for iinterval in range(len(polyfit_degrees)):
P.plot(dftarray_list[iinterval],gwarray_list[iinterval]-dftarray_list[iinterval]-N.polyval(polyfitlist[iinterval],dftarray_list[iinterval]),'bx')
[x_min,x_max] = P.xlim()
P.plot([x_min,x_max],[0,0],'k-')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('Error in the fit (in eV)')
P.show()
return polyfitlist
def compare_bandstructures(ec_ref,ec_test):
nspkpt_ref = len(ec_ref.special_kpoints)
nspkpt_test = len(ec_test.special_kpoints)
if nspkpt_ref != nspkpt_test:
print 'ERROR: The number of special kpoints is different in the two files ... exit'
sys.exit()
eig_type_ref = ec_ref.eigenvalue_type
eig_type_test = ec_test.eigenvalue_type
print eig_type_ref,eig_type_test
if eig_type_ref == 'DFT' and eig_type_test == 'W90':
TOL_KPTS = N.float(1.0e-4)
else:
TOL_KPTS = N.float(1.0e-6)
print TOL_KPTS
for ispkpt in range(nspkpt_ref):
print 'difference between the two :',ec_ref.special_kpoints[ispkpt,:]-ec_test.special_kpoints[ispkpt,:]
if not N.allclose(ec_ref.special_kpoints[ispkpt,:],ec_test.special_kpoints[ispkpt,:],atol=TOL_KPTS):
print 'ERROR: The kpoints are not the same :'
print ' Kpt #%s ' %ispkpt
print ' Reference => %20.17f %20.17f %20.17f' %(ec_ref.special_kpoints[ispkpt,0],ec_ref.special_kpoints[ispkpt,1],ec_ref.special_kpoints[ispkpt,2])
print ' Compared => %20.17f %20.17f %20.17f' %(ec_test.special_kpoints[ispkpt,0],ec_test.special_kpoints[ispkpt,1],ec_test.special_kpoints[ispkpt,2])
print ' ... exit'
sys.exit()
mband_comparison = N.min([ec_ref.mband,ec_test.mband])
if mband_comparison < ec_ref.mband:
print 'Number of bands in the test bandstructure is lower than the number of bands in the reference (%s)' %ec_ref.mband
print ' => Comparison will proceed with %s bands' %ec_test.mband
elif mband_comparison < ec_test.mband:
print 'Number of bands in the reference bandstructure is lower than the number of bands in the test bandstructure (%s)' %ec_test.mband
print ' => Comparison will only proceed with %s bands of the test bandstructure' %ec_ref.mband
else:
print 'Number of bands in the reference and test bandstructure is the same'
print ' => Comparison will proceed with %s bands' %mband_comparison
# eig_test_ref_path = ec_ref.eigenvalues[:,:,:mband_comparison]
rmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
nrmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
mae_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
for isppol in range(ec_ref.nsppol):
for iband in range(mband_comparison):
interp = N.interp(ec_ref.normalized_kpoint_path_values,ec_test.normalized_kpoint_path_values,ec_test.eigenvalues[isppol,:,iband])
rmsd_per_band[isppol,iband] = N.sqrt(N.sum((csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband])**2)/ec_ref.nkpt)
mae_per_band[isppol,iband] = N.sum(N.abs(csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband]))/ec_ref.nkpt
P.figure(1)
P.plot(mae_per_band[0,:])
P.figure(2)
P.plot(rmsd_per_band[0,:])
P.show()
def get_gvectors():
if os.path.isfile('.gvectors.bsinfo'):
print 'File ".gvectors.bsinfo found with the following gvectors information :"'
try:
gvectors_reader = open('.gvectors.bsinfo','r')
gvectors_data = gvectors_reader.readlines()
gvectors_reader.close()
trial_gvectors = N.identity(3,N.float)
trial_gvectors[0,0] = N.float(gvectors_data[0].split()[0])
trial_gvectors[0,1] = N.float(gvectors_data[0].split()[1])
trial_gvectors[0,2] = N.float(gvectors_data[0].split()[2])
trial_gvectors[1,0] = N.float(gvectors_data[1].split()[0])
trial_gvectors[1,1] = N.float(gvectors_data[1].split()[1])
trial_gvectors[1,2] = N.float(gvectors_data[1].split()[2])
trial_gvectors[2,0] = N.float(gvectors_data[2].split()[0])
trial_gvectors[2,1] = N.float(gvectors_data[2].split()[1])
trial_gvectors[2,2] = N.float(gvectors_data[2].split()[2])
print ' gvectors(1) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2])
print ' gvectors(2) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2])
print ' gvectors(3) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2])
except:
print 'ERROR: file ".gvectors.bsinfo" might be corrupted (empty or not formatted correctly ...)'
print ' you should remove the file and start again or check the file ... exit'
sys.exit()
test = raw_input('Press <ENTER> to use these gvectors (any other character to enter manually other gvectors)\n')
if test == '':
gvectors = trial_gvectors
else:
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : \n')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : \n')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : \n')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to overwrite the gvectors contained in the file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
test = raw_input('Do you want to enter the the reciprocal space primitive vectors (y/n)\n')
if test == 'y':
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : ')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : ')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : ')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to write the gvectors to file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[0,0],gvectors[0,1],gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[1,0],gvectors[1,1],gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[2,0],gvectors[2,1],gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
gvectors = None
return gvectors
# Parse the command line options
parser = argparse.ArgumentParser(description='Tool for plotting dft bandstructures')
parser.add_argument('files',help='files to be opened',nargs=1)
args = parser.parse_args()
args_dict = vars(args)
if args_dict['files']:
print 'will open the file'
else:
print 'ERROR: you should provide some bandstructure file ! exiting now ...'
sys.exit()
dft_file = args_dict['files'][0]
gvectors = get_gvectors()
ec_dft = EigenvalueContainer(directory='.',filename=dft_file)
ec_dft.set_kpoint_sampling_type('Bandstructure')
ec_dft.find_special_kpoints(gvectors)
print 'Number of bands in the file : %s' %(N.shape(ec_dft.eigenvalues)[2])
test = raw_input('Enter the number of bands to be plotted (<ENTER> : %s) : \n' %(N.shape(ec_dft.eigenvalues)[2]))
if test == '':
nbd_plot = N.shape(ec_dft.eigenvalues)[2]
else:
nbd_plot = N.int(test)
if nbd_plot > N.shape(ec_dft.eigenvalues)[2]:
print 'ERROR: the number of bands to be plotted is larger than the number available ... exit'
sys.exit()
ec_dft.special_kpoints_names = ['']*len(ec_dft.special_kpoints_indices)
for ii in range(len(ec_dft.special_kpoints_indices)):
ec_dft.special_kpoints_names[ii] = 'k%s' %(ii+1)
print 'List of special kpoints :'
for ii in range(len(ec_dft.special_kpoints_indices)):
spkpt = ec_dft.kpoints[ec_dft.special_kpoints_indices[ii]]
print ' Kpoint %s : %s %s %s' %(ii+1,spkpt[0],spkpt[1],spkpt[2])
print 'Enter the name of the %s special k-points :' %(len(ec_dft.special_kpoints_indices))
test = raw_input('')
if len(test.split()) == len(ec_dft.special_kpoints_indices):
for ii in range(len(ec_dft.special_kpoints_indices)):
ec_dft.special_kpoints_names[ii] = test.split()[ii]
test = raw_input('Enter base name for bandstructure file : \n')
ec_dft.write_bandstructure_to_file('%s.bandstructure' %test)
P.figure(1,figsize=(3.464,5))
P.hold('on')
P.grid('on')
P.xticks(N.take(ec_dft.kpoint_reduced_path_values,N.array(ec_dft.special_kpoints_indices,N.int)),ec_dft.special_kpoints_names)
if ec_dft.nsppol == 1:
for iband in range(nbd_plot):
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
elif ec_dft.nsppol == 2:
for iband in range(nbd_plot):
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[1,:,iband]*csts.hartree2ev,'r-',linewidth=2)
P.show()
| gpl-3.0 |
robios/PyTES | pytes/Util.py | 1 | 32573 | import warnings
import numpy as np
import time
from struct import unpack
from scipy.stats import norm
from scipy.signal import tukey
from Filter import median_filter
import Analysis, Filter, Constants
def savefits(data, filename, vmax=1.0, sps=1e6, bits=14, noise=False, clobber=True):
"""
Save pulse/noise to FITS file
"""
import pyfits as pf
# Prepare data
data = (np.asarray(data)/vmax*2**(bits-1)).round()
# Column Name
if noise:
colname = 'NoiseRec'
else:
colname = 'PulseRec'
# Columns
col_t = pf.Column(name='TIME', format='1D', unit='s', array=np.zeros(data.shape[0], dtype=int))
col_data = pf.Column(name=colname, format='%dI' % data.shape[1], unit='V', array=data)
cols = pf.ColDefs([col_t, col_data])
tbhdu = pf.BinTableHDU.from_columns(cols)
# Name of extension
exthdr = tbhdu.header
exthdr['EXTNAME'] = ('Record', 'name of this binary table extension')
exthdr['EXTVER'] = (1, 'extension version number')
# Add more attributes
exthdr['TSCAL2'] = (vmax/2**(bits-1), '[V/ch]')
exthdr['TZERO2'] = (0., '[V]')
exthdr['THSCL2'] = (sps**-1, '[s/bin] horizontal resolution of record')
exthdr['THZER2'] = (0, '[s] horizontal offset of record')
exthdr['THSAM2'] = (data.shape[1], 'sample number of record')
exthdr['THUNI2'] = ('s', 'physical unit of sampling step of record')
exthdr['TRMIN2'] = (-2**(bits-1)+1, '[channel] minimum number of each sample')
exthdr['TRMAX2'] = (2**(bits-1)-1, '[channel] maximum number of each sample')
exthdr['TRBIN2'] = (1, '[channel] default bin number of each sample')
# More attributes
exthdr['TSTART'] = (0, 'start time of experiment in total second')
exthdr['TSTOP'] = (0, 'end time of experiment in total second')
exthdr['TEND'] = (0, 'end time of experiment (obsolete)')
exthdr['DATE'] = (time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()), 'file creation date (UT)')
# We anyway need Primary HDU
hdu = pf.PrimaryHDU()
# Write to FITS
thdulist = pf.HDUList([hdu, tbhdu])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
thdulist.writeto(filename, clobber=clobber)
def fopen(filename):
"""
Read FITS file
Parameters
==========
filename: file number to read
Returns
=======
t: time array
wave: waveform array
"""
import pyfits as pf
# Open fits file and get pulse/noise data
header = pf.open(filename)
wave = header[1].data.field(1).copy()
dt = header[1].header['THSCL2']
t = np.arange(wave.shape[-1]) * dt
header.close()
return t, wave
def yopen(filenumber, summary=False, nf=None, tmin=None, tmax=None, raw=False):
"""
Read Yokogawa WVF file
Parameters
==========
filenumber: file number to read
summary: to summary waves (default: False)
nf: sigmas for valid data using median noise filter, None to disable noise filter (default: None)
tmin: lower boundary of time for partial extraction, scaler or list (Default: None)
tmax: upper boundary of time for partial extraction, scaler or list (Default: None)
raw: returns raw data without scaling/offsetting if True (Default: False)
Returns
=======
if summary is False:
[ t1, d1, t2, d2, t3, d3, ... ]
if summary is True:
[ t1, d1, err1, t2, d2, err2, ... ]
if raw is True:
t1 is a tuple of (hres1, hofs1, vres1, vofs1)
where t1 is timing for 1st ch, d1 is data for 1st ch, err1 is error (1sigma) for 1st ch, and so on.
"""
# Read header (HDR)
h = open(str(filenumber) + ".HDR")
lines = h.readlines()
h.close()
# Parse $PublicInfo
for line in lines:
token = line.split()
if len(token) > 0:
# Check endian
if token[0] == "Endian":
endian = '>' if token[1] == "Big" else '<'
# Check data format
if token[0] == "DataFormat":
format = token[1]
assert format == "Block"
# Check # of groups
if token[0] == "GroupNumber":
groups = int(token[1])
# Check # of total traces
if token[0] == "TraceTotalNumber":
ttraces = int(token[1])
# Check data offset
if token[0] == "DataOffset":
offset = int(token[1])
# Initialize containers
traces = [None] * groups # Number of traces for each group
blocks = [None] * ttraces # Number of blocks for each trace
bsizes = [None] * ttraces # Block size for each trace
vres = [None] * ttraces # VResolution for each trace
voffset = [None] * ttraces # VOffset for each trace
hres = [None] * ttraces # HResolution for each trace
hoffset = [None] * ttraces # HOffset for each trace
# Parse $Group
for line in lines:
token = line.split()
if len(token) > 0:
# Read current group number
if token[0][:6] == "$Group":
cgn = int(token[0][6:]) - 1 # Current group number (minus 1)
# Check # of traces in this group
if token[0] == "TraceNumber":
traces[cgn] = int(token[1])
traceofs = np.sum(traces[:cgn], dtype=int)
# Check # of Blocks
if token[0] == "BlockNumber":
blocks[traceofs:traceofs+traces[cgn]] = [ int(token[1]) ] * traces[cgn]
# Check Block Size
if token[0] == "BlockSize":
bsizes[traceofs:traceofs+traces[cgn]] = [ int(s) for s in token[1:] ]
# Check VResolusion
if token[0] == "VResolution":
vres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
# Check VOffset
if token[0] == "VOffset":
voffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
# Check VDataType
if token[0] == "VDataType":
assert token[1] == "IS2"
# Check HResolution
if token[0] == "HResolution":
hres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
# Check HOffset
if token[0] == "HOffset":
hoffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
# Data Initialization
time = [ np.array(range(bsizes[t])) * hres[t] + hoffset[t] for t in range(ttraces) ]
data = [ [None] * blocks[t] for t in range(ttraces) ]
# Open WVF
f = open(str(filenumber) + ".WVF", 'rb')
f.seek(offset)
# Read WVF
if format == "Block":
# Block format (assuming block size is the same for all the traces in Block format)
for b in range(blocks[0]):
for t in range(ttraces):
if raw:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')
else:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]
else:
# Trace format
for t in range(ttraces):
for b in range(blocks[t]):
if raw:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')
else:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]
# Array conversion
for t in range(ttraces):
if raw:
data[t] = np.array(data[t], dtype='int64')
else:
data[t] = np.array(data[t])
# Tmin/Tmax filtering
for t in range(ttraces):
if type(tmin) == list or type(tmax) == list:
if not (type(tmin) == list and type(tmax) == list and len(tmin) == len(tmax)):
raise ValueError("tmin and tmax both have to be list and have to have the same length.")
mask = np.add.reduce([ (time[t] >= _tmin) & (time[t] < _tmax) for (_tmax, _tmin) in zip(tmax, tmin)], dtype=bool)
else:
_tmin = np.min(time[t]) if tmin is None else tmin
_tmax = np.max(time[t]) + 1 if tmax is None else tmax
mask = (time[t] >= _tmin) & (time[t] < _tmax)
data[t] = data[t][:, mask]
time[t] = time[t][mask]
f.close()
if summary is False:
# Return wave data as is
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), data[t] ] for t in range(ttraces) ]
else:
return [ [ time[t], data[t] ] for t in range(ttraces) ]
else:
if nf is None:
# Noise filter is off
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), np.mean(data[t].astype(dtype='float64'), axis=0), np.std(data[t].astype(dtype='float64'), axis=0, ddof=1) ]
for t in range(ttraces) ]
else:
return [ [ time[t], np.mean(data[t], axis=0), np.std(data[t], axis=0, ddof=1) ]
for t in range(ttraces) ]
else:
# Noise filter is on
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]),
np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t].astype(dtype='float64')),
np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t].astype(dtype='float64')) ]
for t in range(ttraces) ]
else:
return [ [ time[t],
np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t]),
np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t]) ]
for t in range(ttraces) ]
def popen(filename, ch=None, raw=False):
"""
Read pls file
Parameters
==========
filename: file name to read
ch: returns data only for the given channel if given (Default: None)
raw: returns raw data without scaling/offsetting if True (Default: False)
Returns
=======
if raw is True:
[ header, vres, vofs, hres, hofs, tick, num, data, edata ]
else:
[ header, t, tick, num, data, edata ]
"""
# Initialize
header = {'COMMENT': []}
vres = {}
vofs = {}
hres = {}
hofs = {}
tick = {}
num = {}
data = {}
edata = {}
# Parser
def parser():
"""
PLS Data Parser (generator)
"""
# Initialization
samples = -1
extra = 0
chunk = ''
isHeader = True
while True:
while len(chunk) < 2:
chunk += yield
# Get the magic character
magic = chunk[0]
if isHeader and magic == 'C':
# Comment
while len(chunk) < 80:
chunk += yield
header['COMMENT'].append(chunk[2:80])
chunk = chunk[80:]
elif isHeader and magic == 'V':
# Version
while len(chunk) < 80:
chunk += yield
header['VERSION'] = chunk[2:80]
chunk = chunk[80:]
elif isHeader and magic == 'O':
# Date
while len(chunk) < 10:
chunk += yield
_m, _d, _y = map(int, chunk[2:10].split())
header['DATE'] = "%d/%d/%d" % (_y, _m, _d)
chunk = chunk[10:]
elif isHeader and magic == 'S':
# Number of Samples
while len(chunk) < 7:
chunk += yield
header['SAMPLES'] = samples = int(chunk[2:7])
chunk = chunk[7:]
elif isHeader and magic == 'E':
# Extra Bytes
while len(chunk) < 7:
chunk += yield
header['EXTRA'] = extra = int(chunk[2:7])
chunk = chunk[7:]
elif isHeader and magic == 'P':
# Discriminator
while len(chunk) < 78:
chunk += yield
_dis = chunk[2:78].split()
if _dis[0] == '01':
header['ULD'] = eval(_dis[1])
elif _dis[0] == '02':
header['LLD'] = eval(_dis[1])
chunk = chunk[78:]
elif isHeader and magic == 'N':
# Normalization
while len(chunk) < 47:
chunk += yield
_ch, _hofs, _hres, _vofs, _vres = chunk[2:47].split()
_ch = int(_ch)
vres[_ch] = eval(_vres)
vofs[_ch] = eval(_vofs)
hres[_ch] = eval(_hres)
hofs[_ch] = eval(_hofs)
chunk = chunk[47:]
elif magic == 'D':
# Data
isHeader = False
if samples < 0:
raise ValueError("Invalid number of samples.")
while len(chunk) < (11 + samples*2):
chunk += yield
_ch, _tick, _num = unpack('<BII', chunk[2:11])
if not data.has_key(_ch):
data[_ch] = bytearray()
tick[_ch] = []
num[_ch] = []
edata[_ch] = bytearray()
data[_ch] += chunk[11:11 + samples*2]
tick[_ch].append(_tick)
num[_ch].append(_num)
edata[_ch] += chunk[11 + samples*2:11 + samples*2 + extra]
chunk = chunk[11 + samples*2 + extra:]
else:
# Skip unknown magic
chunk = chunk[1:]
# Open pls file and read by chunks
f = open(filename, 'rb')
# Start parser
p = parser()
p.next()
# Read by chunk and parse it
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024) # read 1 MB
if not chunk:
break
p.send(chunk)
# Convert buffer to numpy array
for k in ([ch] if ch else data.keys()):
data[k] = np.frombuffer(data[k], dtype='>i2').reshape(-1, header['SAMPLES'])
edata[k] = np.frombuffer(edata[k], dtype='>u1').reshape(-1, header['SAMPLES'])
if raw:
if ch:
return header, vres[ch], vofs[ch], hres[ch], hofs[ch], tick[ch], num[ch], data[ch], edata[ch]
else:
return header, vres, vofs, hres, hofs, tick, num, data, edata
else:
t = {}
for k in ([ch] if ch else data.keys()):
# Normalize data using res/ofs
t[k] = (np.arange(header['SAMPLES']) + hofs[k]) * hres[k]
data[k] = (np.asarray(data[k]) + vofs[k]) * vres[k]
if ch:
return header, t[ch], tick[ch], num[ch], data[ch], edata[ch]
else:
return header, t, tick, num, data, edata
def tesana(t, p, n, lpfc=None, hpfc=None, binsize=1, max_shift=10,
thre=0.4, filt=None, nulldc=False, offset=False, center=False, sigma=3,
gain=None, dsr=None, shift=False, ocmethod="ols", flip=False, atom="Mn",
kbfit=False, ignorekb=False, method="mle",
rshunt=None, tbias=None, ites=None, ka_min=80, kb_min=40,
tex=False, plotting=True, savedat=False, session="Unnamed"):
"""
Perform TES Analysis
Parameters (and their default values):
t: time data (array-like)
p: pulse data (array-like)
n: noise data (array-like)
lpfc: low-pass filter cut-off frequency in bins (Default: None)
hpfc: high-pass filter cut-off frequency in bins (Default: None)
binsize: energy bin size for histograms and fittings (only for ls ans cs) in eV (Default: 1)
max_shift: maximum allowed shifts to calculate maximum cross correlation (Default: 10)
thre: correlation threshold for offset correction (Default: 0.4)
filt: window function (hanning/hamming/blackman/tukey) (Default: None)
nulldc: nullify the DC bin when template generation (Default: False)
offset: subtract DC offset (Default: False)
center: centering pulse rise (Default: False)
sigma: sigmas for median filter (Default: 3)
gain: feedback gain for current-space conversion (Default: None)
dsr: down-sampling rate (Default: None)
shift: treat dE as energy shift instead of scaling (Default: False)
ocmethod: offset correction fitting method (ols/odr) (Default: ols)
flip: flip x and y when offset correction fitting (Default: False)
atom: atom to fit (Default: Mn)
kbfit: fit Kb line (Default: False)
ignorekb: ignore Kb line when linearity correction (Default: False)
method: fitting method (mle/ls/cs) (Default: mle)
rshunt: shunt resistance value for r-space conversion (Default: None)
tbias: TES bias current for r-space conversion (Default: None)
ites: TES current for r-space conversion (Default: None)
ka_min: minimum counts to group bins for Ka line (valid only for ls/cs fittings) (Default: 80)
ka_min: minimum counts to group bins for Kb line (valid only for ls/cs fittings) (Default: 20)
tex: use TeX for plots (Default: False)
plotting: generate and save plots (Default: True)
savedat: save data to files (Default: False)
session: session name for plots and data files (Default: Unnamed)
Note:
- Use offset option when using filt option
- Consider using center option when using filt option
"""
if plotting:
# Import matplotlib
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['text.usetex'] = str(tex)
from pylab import figure, plot, errorbar, hist, axvline, xlim, ylim, loglog, xlabel, ylabel, legend, tight_layout, savefig
print "Session: %s" % session
# Preparation
p = np.asarray(p)
n = np.asarray(n)
t = np.asarray(t)
dt = np.diff(t)[0]
df = (dt * t.shape[-1])**-1
# Subtract offset
if offset:
ofs = np.median(n)
p -= ofs
n -= ofs
# Convert to current-space if needed
if gain:
print "Converting to current-space"
p /= gain
n /= gain
# Convert to resistance-space
Rspace = False
if gain and rshunt and tbias and ites:
print "Converting to resistance-space"
ofs = np.median(n)
p += (ites - ofs)
n += (ites - ofs)
# Convert to resistance
p = (tbias - p) * rshunt / p
n = (tbias - n) * rshunt / n
Rspace = True
# Down-sample
if dsr > 1:
p = p[:,:p.shape[-1]/dsr*dsr].reshape(p.shape[0], -1, dsr).mean(axis=-1)
n = n[:,:n.shape[-1]/dsr*dsr].reshape(n.shape[0], -1, dsr).mean(axis=-1)
dt *= dsr
t = t[::dsr]
# Pulse centering (for filtering)
if center:
# Roll pulse to the center
r = p.shape[-1] / 2 - np.median(abs(p - Filter.offset(p)[:, np.newaxis]).argmax(axis=-1))
p = np.hstack((p[...,-r:], p[...,:-r]))
# Calculate offset (needs to be done before applying filter)
if p.size > 0:
offset = Filter.offset(p)
# Generate Filter
if filt is None:
pass
else:
if filt.lower() == "hanning":
f = np.hanning(p.shape[-1])
elif filt.lower() == "hamming":
f = np.hamming(p.shape[-1])
elif filt.lower() == "blackman":
f = np.blackman(p.shape[-1])
elif filt.lower() == "tukey":
f = tukey(p.shape[-1])
else:
raise ValueError('Unsupported filter: %s' % filt.lower())
print "Window filter function: %s" % filt.lower()
# Amplitude correction
cf = f.sum() / len(f)
p *= (f / cf)
n *= (f / cf)
# Equivalent noise bandwidth correction
enb = len(f)*(f**2).sum()/f.sum()**2
df *= enb
if p.size > 0:
# Calculate averaged pulse
avgp = Filter.average_pulse(p, max_shift=max_shift)
if savedat:
np.savetxt('%s-averagepulse.dat' % session, np.vstack((t, avgp)).T,
header='Time (s), Averaged Pulse (%s)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
figure()
plot(t, avgp)
xlabel('Time$\quad$(s)')
ylabel('Averaged Pulse$\quad$(%s)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-averagepulse.pdf' % session)
# Calculate averaged pulse spectrum
avgps = np.sqrt(Filter.power(avgp)) / df
if savedat:
np.savetxt('%s-avgpulse-power.dat' % session, np.vstack((np.arange(len(avgps))*df, avgps)).T,
header='Frequency (Hz), Average Pulse Power (%s/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
avgps[0] = 0 # for better plot
figure()
plot(np.arange(len(avgps))*df, avgps)
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('Average Pulse Power$\quad$(%s/Hz)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-avgpulse-power.pdf' % session)
if n.size > 0:
# Plot noise spectrum
avgns = np.sqrt(Filter.average_noise(n) / df)
if savedat:
np.savetxt('%s-noise.dat' % session, np.vstack((np.arange(len(avgns))*df, avgns)).T,
header='Frequency (Hz), Noise (%s/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
avgns[0] = 0 # for better plot
figure()
plot(np.arange(len(avgns))*df, avgns)
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('Noise$\quad$(%s/$\sqrt{\mathrm{Hz}}$)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-noise.pdf' % session)
if p.size > 0 and n.size > 0:
# Generate template
tmpl, sn = Filter.generate_template(p, n, lpfc=lpfc, hpfc=hpfc, nulldc=nulldc, max_shift=max_shift)
if savedat:
np.savetxt('%s-template.dat' % session, np.vstack((t, tmpl)).T,
header='Time (s), Template (A.U.)', delimiter='\t')
np.savetxt('%s-sn.dat' % session, np.vstack((np.arange(len(sn))*df, sn/np.sqrt(df))).T,
header='Frequency (Hz), S/N (/srHz)', delimiter='\t')
if plotting:
# Plot template
figure()
plot(t, tmpl)
xlabel('Time$\quad$(s)')
ylabel('Template$\quad$(A.U.)')
tight_layout()
savefig('%s-template.pdf' % session)
# Plot SNR
figure()
plot(np.arange(len(sn))*df, sn/np.sqrt(df))
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('S/N$\quad$(/$\sqrt{\mathrm{Hz}}$)')
tight_layout()
savefig('%s-sn.pdf' % session)
# Calculate baseline resolution
print "Resolving power: %.2f (%.2f eV @ 5.9 keV)" % (np.sqrt((sn**2).sum()*2), Analysis.baseline(sn))
# Perform optimal filtering
pha_p = Filter.optimal_filter(p, tmpl, max_shift=max_shift)
pha_n = Filter.optimal_filter(n, tmpl, max_shift=0)
# Offset correction
(a, b), coef = Analysis.fit_offset(pha_p, offset, sigma=sigma, method=ocmethod, flip=flip)
if coef > thre:
oc_pha_p = Analysis.offset_correction(pha_p, offset, b)
oc_pha_n = Analysis.offset_correction(pha_n, offset, b)
print "Offset correction with: PHA = %f * (1 + %f * Offset)" % (a, b)
if plotting:
figure()
ka = Analysis.ka(np.vstack((pha_p, offset)).T, sigma=sigma)
plot(ka.T[1], ka.T[0], '.', c='k')
x_min, x_max = xlim()
ofs = np.linspace(x_min, x_max)
label = '$\mathrm{PHA}=%.2f\\times(1+%.2f\\times\mathrm{Offset})$' % (a, b)
plot(ofs, a*(1+b*ofs), 'r-', label=label)
xlabel('Offset$\quad$(V)')
ylabel('PHA$\quad$(V)')
legend(frameon=False)
tight_layout()
savefig('%s-offset.pdf' % session)
else:
oc_pha_p = pha_p
oc_pha_n = pha_n
print "Skipped offset correction: correlation coefficient (%f) is too small" % coef
# Check line database
if "%sKa" % atom not in Constants.LE.keys() or "%sKb" % atom not in Constants.LE.keys():
raise ValueError('Unsupported atom: %s' % atom)
# Linearity correction
pha_line_center = np.asarray([ np.median(Analysis.ka(oc_pha_p, sigma=sigma)), np.median(Analysis.kb(oc_pha_p, sigma=sigma)) ])
line_energy = np.asarray([ Constants.LE['%sKa' % atom], Constants.LE['%sKb' % atom] ])
if ignorekb:
a, b = Analysis.fit_linearity([pha_line_center[0]], [line_energy[0]], deg=1)
print "Linearity correction with: PHA = %e * E" % (b)
else:
a, b = Analysis.fit_linearity(pha_line_center, line_energy, deg=2)
print "Linearity correction with: PHA = %e * E^2 + %e * E" % (a, b)
print "MnKb saturation ratio: %.2f %%" % ((pha_line_center[1]/pha_line_center[0])/(line_energy[1]/line_energy[0])*100)
lc_pha_p = Analysis.linearity_correction(oc_pha_p, a, b)
lc_pha_n = Analysis.linearity_correction(oc_pha_n, a, b)
if savedat:
np.savetxt('%s-linearity.dat' % session, array([pha_line_center[0]]) if ignorekb else pha_line_center[np.newaxis,:],
header='%sKa PHA' % atom if ignorekb else '%sKa PHA, %sKb PHA' % (atom, atom), delimiter='\t')
if plotting:
figure()
x = np.linspace(0, 7e3)
if ignorekb:
plot(line_energy[0]/1e3, pha_line_center[0], '+', color='b')
plot(x/1e3, x*b, 'r--')
else:
plot(line_energy/1e3, pha_line_center, '+', color='b')
plot(x/1e3, x**2*a+x*b, 'r--')
xlim((0, 7))
xlabel('Energy$\quad$(keV)')
ylabel('PHA$\quad$(a.u.)')
tight_layout()
savefig('%s-linearity.pdf' % session)
# Energy Spectrum
if plotting:
figure()
hcount, hbin, hpatch = hist(lc_pha_p[lc_pha_p==lc_pha_p]/1e3, bins=7000/binsize, histtype='stepfilled', color='y')
xlim(0, 7)
xlabel('Energy$\quad$(keV)')
ylabel('Count')
tight_layout()
savefig('%s-spec.pdf' % session)
if savedat:
hcount, hbin = np.histogram(lc_pha_p[lc_pha_p==lc_pha_p]/1e3, bins=7000/binsize)
np.savetxt('%s-spec.dat' % session, np.vstack(((hbin[1:]+hbin[:-1])/2, hcount)).T,
header='Energy (keV), Count', delimiter='\t')
# Line fitting
def _line_fit(data, min, line):
# Fit
(dE, width), (dE_error, width_error), e = Analysis.fit(data, binsize=binsize, min=min, line=line, shift=shift, method=method)
if method == "cs":
chi_squared, dof = e
if method in ("mle", "ls"):
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV" \
% (line, width, width_error, dE)
elif method == "cs":
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV (Red. chi^2 = %.1f/%d = %.2f)" \
% (line, width, width_error, dE, chi_squared, dof, chi_squared/dof)
return dE, width, width_error
def _line_spectrum(data, min, line, dE, width, width_error):
# Draw histogram
n, bins = Analysis.histogram(data, binsize=binsize)
if method in ("cs"):
gn, gbins = Analysis.group_bin(n, bins, min=min)
else:
# No grouping in mle and ls
gn, gbins = n, bins
ngn = gn/(np.diff(gbins))
ngn_sigma = np.sqrt(gn)/(np.diff(gbins))
cbins = (gbins[1:]+gbins[:-1])/2
if plotting:
figure()
if width_error is not None:
label = 'FWHM$=%.2f\pm %.2f$ eV' % (width, width_error)
else:
label = 'FWHM$=%.2f$ eV (Fixed)' % width
if method == "cs":
errorbar(cbins, ngn, yerr=ngn_sigma, xerr=np.diff(gbins)/2, capsize=0, ecolor='k', fmt=None, label=label)
else:
hist(data, bins=gbins, weights=np.ones(len(data))/binsize, histtype='step', ec='k', label=label)
E = np.linspace(bins.min(), bins.max(), 1000)
model = Analysis.normalization(ngn, gbins, dE, width, line=line, shift=shift) \
* Analysis.line_model(E, dE, width, line=line, shift=shift, full=True)
# Plot theoretical model
plot(E, model[0], 'r-')
# Plot fine structures
for m in model[1:]:
plot(E, m, 'b--')
xlabel('Energy$\quad$(eV)')
ylabel('Normalized Count$\quad$(count/eV)')
legend(frameon=False)
ymin, ymax = ylim()
ylim(ymin, ymax*1.1)
tight_layout()
savefig("%s-%s.pdf" % (session, line))
if savedat:
np.savetxt('%s-%s.dat' % (session, line), np.vstack((cbins, gn)).T,
header='Energy (keV), Count', delimiter='\t')
## Ka
ka = Analysis.ka(lc_pha_p, sigma=sigma)
dE, width, width_error = _line_fit(ka, ka_min, "%sKa" % atom)
_line_spectrum(ka, ka_min, "%sKa" % atom, dE, width, width_error)
## Kb
kb = Analysis.kb(lc_pha_p, sigma=sigma)
if kbfit:
dE, width, width_error = _line_fit(kb, kb_min, "%sKb" % atom)
else:
width_error = None
_line_spectrum(kb, kb_min, "%sKb" % atom, dE, width, width_error)
## Baseline
f_pha_n = lc_pha_n[Filter.median_filter(lc_pha_n, sigma=sigma)]
baseline = Analysis.sigma2fwhm(np.std(f_pha_n))
print "Baseline resolution: %.2f eV" % baseline
n, bins = Analysis.histogram(f_pha_n, binsize=binsize)
if savedat:
np.savetxt('%s-baseline.dat' % session, np.vstack(((bins[1:]+bins[:-1])/2, n)).T,
header='Energy (keV), Count', delimiter='\t')
if plotting:
figure()
label = 'FWHM$=%.2f$ eV' % baseline
hist(f_pha_n, bins=bins, weights=np.ones(len(f_pha_n))/binsize, histtype='step', ec='k', label=label)
mu, sigma = norm.fit(f_pha_n)
E = np.linspace(bins.min(), bins.max(), 1000)
plot(E, norm.pdf(E, loc=mu, scale=sigma)*len(f_pha_n), 'r-')
xlabel('Energy$\quad$(eV)')
ylabel('Normalized Count$\quad$(count/eV)')
legend(frameon=False)
tight_layout()
savefig('%s-baseline.pdf' % session) | mit |
mwv/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/plot_legislation/plot_ticpe_taux_implicite.py | 4 | 2264 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:06:45 2015
@author: thomas.douenne
TICPE: Taxe intérieure sur la consommation des produits énergétiques
"""
# L'objectif de ce script est d'illustrer graphiquement l'évolution du taux implicite de la TICPE depuis 1993.
# On étudie ce taux pour le diesel, et pour les carburants sans plombs.
# Import de modules généraux
from pandas import concat
# Import de modules spécifiques à Openfisca
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar_list
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_accises import get_accises_carburants
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_tva import get_tva_taux_plein
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_prix_carburants import \
get_prix_carburants
# Appel des paramètres de la législation et des prix
ticpe = ['ticpe_gazole', 'ticpe_super9598']
accise_diesel = get_accises_carburants(ticpe)
prix_ttc = ['diesel_ttc', 'super_95_ttc']
prix_carburants = get_prix_carburants(prix_ttc)
tva_taux_plein = get_tva_taux_plein()
# Création d'une dataframe contenant ces paramètres
df_taux_implicite = concat([accise_diesel, prix_carburants, tva_taux_plein], axis = 1)
df_taux_implicite.rename(columns = {'value': 'taux plein tva'}, inplace = True)
# A partir des paramètres, calcul des taux de taxation implicites
df_taux_implicite['taux_implicite_diesel'] = (
df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva']) /
(df_taux_implicite['prix diesel ttc'] -
(df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva'])))
)
df_taux_implicite['taux_implicite_sp95'] = (
df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva']) /
(df_taux_implicite['prix super 95 ttc'] -
(df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva'])))
)
df_taux_implicite = df_taux_implicite.dropna()
# Réalisation des graphiques
graph_builder_bar_list(df_taux_implicite['taux_implicite_diesel'], 1, 1)
graph_builder_bar_list(df_taux_implicite['taux_implicite_sp95'], 1, 1)
| agpl-3.0 |
jmontgom10/Mimir_pyPol | oldCode/04b_avgBAABditherHWPimages.py | 1 | 17054 | # -*- coding: utf-8 -*-
"""
Combines all the images for a given (TARGET, FILTER, HWP) combination to
produce a single, average image.
Estimates the sky background level of the on-target position at the time of the
on-target observation using a bracketing pair of off-target observations through
the same HWP polaroid rotation value. Subtracts this background level from
each on-target image to produce background free images. Applies an airmass
correction to each image, and combines these final image to produce a background
free, airmass corrected, average image.
"""
# Core imports
import os
import sys
import copy
import warnings
# Import scipy/numpy packages
import numpy as np
from scipy import ndimage
# Import astropy packages
from astropy.table import Table
import astropy.units as u
from astropy.convolution import Gaussian2DKernel
from astropy.modeling import models, fitting
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import (make_source_mask,
MedianBackground, SigmaClip, Background2D)
# Import plotting utilities
from matplotlib import pyplot as plt
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is a list of targets for which to process each subgroup (observational
# group... never spanning multiple nights, etc...) instead of combining into a
# single "metagroup" for all observations of that target. The default behavior
# is to go ahead and combine everything into a single, large "metagroup". The
# calibration data should probably not be processed as a metagroup though.
processSubGroupList = []
processSubGroupList = [t.upper() for t in processSubGroupList]
# Define the location of the PPOL reduced data to be read and worked on
PPOL_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611\\'
S3_dir = os.path.join(PPOL_data, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611'
# This is the location of the previously generated masks (step 4)
maskDir = os.path.join(pyPol_data, 'Masks')
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
HWPDir = os.path.join(polarimetryDir, 'HWPImgs')
if (not os.path.isdir(HWPDir)):
os.mkdir(HWPDir, 0o755)
bkgPlotDir = os.path.join(HWPDir, 'bkgPlots')
if (not os.path.isdir(bkgPlotDir)):
os.mkdir(bkgPlotDir, 0o755)
# # Setup PRISM detector properties
# read_noise = 13.0 # electrons
# effective_gain = 3.3 # electrons/ADU
#########
### Establish the atmospheric extinction (magnitudes/airmass)
#########
# Following table from Hu (2011)
# Data from Gaomeigu Observational Station
# Passband | K'(lambda) [mag/airmass] | K'' [mag/(color*airmass)]
# U 0.560 +/- 0.023 0.061 +/- 0.004
# B 0.336 +/- 0.021 0.012 +/- 0.003
# V 0.198 +/- 0.024 -0.015 +/- 0.004
# R 0.142 +/- 0.021 -0.067 +/- 0.005
# I 0.093 +/- 0.020 0.023 +/- 0.006
# Following table from Schmude (1994)
# Data from Texas A & M University Observatory
# Passband | K(lambda) [mag/airmass] | dispersion on K(lambda)
# U 0.60 +/- 0.05 0.120
# B 0.40 +/- 0.06 0.165
# V 0.26 +/- 0.03 0.084
# R 0.19 +/- 0.03 0.068
# I 0.16 +/- 0.02 0.055
# TODO: Ask Dan about atmospheric extinction from airmass at NIR
kappa = dict(zip(['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K' ],
[0.60, 0.40, 0.26, 0.19, 0.16, 0.05, 0.01, 0.005]))
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Determine which parts of the fileIndex pertain to HEX dither science images
useFiles = np.logical_and(
fileIndex['USE'] == 1,
fileIndex['DITHER_TYPE'] == 'ABBA'
)
useFileRows = np.where(useFiles)
# Cull the file index to only include files selected for use
fileIndex = fileIndex[useFileRows]
# Define an approximate pixel scale
pixScale = 0.5789*(u.arcsec/u.pixel)
# TODO: implement a FWHM seeing cut... not yet working because PSF getter seems
# to be malfunctioning in step 2
#
#
# # Loop through each unique GROUP_ID and test for bad seeing conditions.
# groupByID = fileIndex.group_by(['GROUP_ID'])
# for subGroup in groupByID.groups:
# # Grab the FWHM values for this subGroup
# thisFWHMs = subGroup['FWHM']*u.pixel
#
# # Grab the median and standard deviation of the seeing for this subgroup
# medianSeeing = np.median(thisFWHMs)
# stdSeeing = np.std(thisFWHMs)
#
# # Find bad FWHM values
# badFWHMs = np.logical_not(np.isfinite(subGroup['FWHM']))
# badFWHMs = np.logical_or(
# badFWHMs,
# thisFWHMs <= 0
# )
# badFWHM = np.logical_and(
# badFWHM,
# thisFWHMs > 2.0*u.arcsec
# )
# import pdb; pdb.set_trace()
# Group the fileIndex by...
# 1. Target
# 2. Waveband
fileIndexByTarget = fileIndex.group_by(['TARGET', 'FILTER'])
# Loop through each group
for group in fileIndexByTarget.groups:
# Grab the current group information
thisTarget = str(np.unique(group['TARGET'].data)[0])
thisFilter = str(np.unique(group['FILTER'].data)[0])
# # Skip the Merope nebula for now... not of primary scientific importance
# if thisTarget == 'MEROPE': continue
# Update the user on processing status
print('\nProcessing images for')
print('Target : {0}'.format(thisTarget))
print('Filter : {0}'.format(thisFilter))
# Grab the atmospheric extinction coefficient for this wavelength
thisKappa = kappa[thisFilter]
# Further divide this group by its constituent HWP values
indexByPolAng = group.group_by(['IPPA'])
# Loop over each of the HWP values, as these are independent from
# eachother and should be treated entirely separately from eachother.
for IPPAgroup in indexByPolAng.groups:
# Grab the current HWP information
thisIPPA = np.unique(IPPAgroup['IPPA'].data)[0]
# Update the user on processing status
print('\tIPPA : {0}'.format(thisIPPA))
# For ABBA dithers, we need to compute the background levels on a
# sub-group basis. If this target has not been selected for subGroup
# averaging, then simply append the background subtracted images to a
# cumulative list of images to align and average.
# Initalize an image list to store all the images for this
# (target, filter, pol-ang) combination
imgList = []
indexByGroupID = IPPAgroup.group_by(['GROUP_ID'])
for subGroup in indexByGroupID.groups:
# Grab the numae of this subGroup
thisSubGroup = str(np.unique(subGroup['OBJECT'])[0])
# if (thisSubGroup != 'NGC2023_R1') and (thisSubGroup != 'NGC2023_R2'): continue
# Construct the output file name and test if it alread exsits.
if thisTarget in processSubGroupList:
outFile = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])
outFile = os.path.join(HWPDir, outFile) + '.fits'
elif thisTarget not in processSubGroupList:
outFile = '_'.join([thisTarget, thisFilter, str(thisIPPA)])
outFile = os.path.join(HWPDir, outFile) + '.fits'
# Test if this file has already been constructed and either skip
# this subgroup or break out of the subgroup loop.
if os.path.isfile(outFile):
print('\t\tFile {0} already exists...'.format(os.path.basename(outFile)))
if thisTarget in processSubGroupList:
continue
elif thisTarget not in processSubGroupList:
break
# Update the user on the current execution status
print('\t\tProcessing images for subgroup {0}'.format(thisSubGroup))
# Initalize lists to store the A and B images.
AimgList = []
BimgList = []
# Initalize a list to store the off-target sky background levels
BbkgList = []
# Initilaze lists to store the times of observation
AdatetimeList = []
BdatetimeList = []
# Read in all the images for this subgroup
progressString = '\t\tNumber of Images : {0}'
for iFile, filename in enumerate(subGroup['FILENAME']):
# Update the user on processing status
print(progressString.format(iFile+1), end='\r')
# Read in a temporary compy of this image
PPOL_file = os.path.join(S3_dir, filename)
tmpImg = ai.reduced.ReducedScience.read(PPOL_file)
# Crop the edges of this image
ny, nx = tmpImg.shape
binningArray = np.array(tmpImg.binning)
# Compute the amount to crop to get a 1000 x 1000 image
cy, cx = (ny - 1000, nx - 1000)
# Compute the crop boundaries and apply them
lf = np.int(np.round(0.5*cx))
rt = lf + 1000
bt = np.int(np.round(0.5*cy))
tp = bt + 1000
tmpImg = tmpImg[bt:tp, lf:rt]
# Grab the on-off target value for this image
thisAB = subGroup['AB'][iFile]
# Place the image in a list and store required background values
if thisAB == 'B':
# Place B images in the BimgList
BimgList.append(tmpImg)
# Place the median value of this off-target image in list
mask = make_source_mask(
tmpImg.data, snr=2, npixels=5, dilate_size=11
)
mean, median, std = sigma_clipped_stats(
tmpImg.data, sigma=3.0, mask=mask
)
BbkgList.append(median)
# Place the time of this image in a list of time values
BdatetimeList.append(tmpImg.julianDate)
if thisAB == 'A':
# Read in any associated masks and store them.
maskFile = os.path.join(maskDir, os.path.basename(filename))
# If there is a mask for this file, then apply it!
if os.path.isfile(maskFile):
# Read in the mask file
tmpMask = ai.reduced.ReducedScience.read(maskFile)
# Crop the mask to match the shape of the original image
tmpMask = tmpMask[cy:ny-cy, cx:nx-cx]
# Grab the data to be masked
tmpData = tmpImg.data
# Mask the data and put it back into the tmpImg
maskInds = np.where(tmpMask.data)
tmpData[maskInds] = np.NaN
tmpImg.data = tmpData
# Place B images in the BimgList
AimgList.append(tmpImg)
# Place the time of this image in a list of time values
AdatetimeList.append(tmpImg.julianDate)
# Create a new line for shell output
print('')
# Construct an image stack of the off-target images
BimageStack = ai.utilitywrappers.ImageStack(BimgList)
# Build a supersky image from these off-target images
superskyImage = BimageStack.produce_supersky()
import pdb; pdb.set_trace()
# Locate regions outside of a 5% deviation
tmpSuperskyData = superskyImage.data
maskedPix = np.abs(tmpSuperskyData - 1.0) > 0.05
# Get rid of the small stuff and expand the big stuff
maskedPix = ndimage.binary_opening(maskedPix, iterations=2)
maskedPix = ndimage.binary_closing(maskedPix, iterations=2)
maskedPix = ndimage.binary_dilation(maskedPix, iterations=4)
# TODO: Make the box_size and filter_size sensitive to binning.
binningArray = np.array(superskyImage.binning)
box_size = tuple((100/binningArray).astype(int))
filter_size = tuple((10/binningArray).astype(int))
# Setup the sigma clipping and median background estimators
sigma_clip = SigmaClip(sigma=3., iters=10)
bkg_estimator = MedianBackground()
# Compute a smoothed background image
bkgData = Background2D(superskyImage.data,
box_size=box_size, filter_size=filter_size, mask=maskedPix,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
# Construct a smoothed supersky image object
smoothedSuperskyImage = ai.reduced.ReducedScience(
bkgData.background/bkgData.background_median,
uncertainty = bkgData.background_rms,
properties={'unit':u.dimensionless_unscaled}
)
# Interpolate background values to A times
AbkgList = np.interp(
AdatetimeList,
BdatetimeList,
BbkgList,
left=-1e6,
right=-1e6
)
# Cut out any extrapolated data (and corresponding images)
goodInds = np.where(AbkgList > -1e5)
AimgList = np.array(AimgList)[goodInds]
AdatetimeList = np.array(AdatetimeList)[goodInds]
AbkgList = AbkgList[goodInds]
AsubtractedList = []
# Loop through the on-target images and subtract background values
for Aimg, Abkg in zip(AimgList, AbkgList):
# Subtract the interpolated background values from the A images
tmpImg = Aimg - smoothedSuperskyImage*(Abkg*Aimg.unit)
# Apply an airmass correction
tmpImg = tmpImg.correct_airmass(thisKappa)
# Append the subtracted and masked image to the list.
AsubtractedList.append(tmpImg)
# Now that the images have been fully processed, pause to generate
# a plot to store in the "background plots" folder. These plots
# constitute a good sanity check on background subtraction.
plt.plot(BdatetimeList, BbkgList, '-ob')
plt.scatter(AdatetimeList, AbkgList, marker='o', facecolor='r')
plt.xlabel('Julian Date')
plt.ylabel('Background Value [ADU]')
figName = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])
figName = os.path.join(bkgPlotDir, figName) + '.png'
plt.savefig(figName, dpi=300)
plt.close('all')
# Here is where I need to decide if each subgroup image should be
# computed or if I should just continue with the loop.
if thisTarget.upper() in processSubGroupList:
# Construct an image combiner for the A images
AimgStack = ai.utilitywrappers.ImageStack(AsubtractedList)
# Align the images
AimgStack.align_images_with_wcs(
subPixel=False,
padding=np.NaN
)
# Combine the images
AoutImg = imgStack.combine_images()
# Save the image
AoutImg.write(outFile, dtype=np.float64)
else:
# Extend the imgList variable with background corrected images
imgList.extend(AsubtractedList)
if len(imgList) > 0:
# At the exit of the loop, process ALL the files from ALL the groups
# Construct an image combiner for the A images
imgStack = ai.utilitywrappers.ImageStack(imgList)
# Align the images
imgStack.align_images_with_wcs(
subPixel=False,
padding=np.NaN
)
# Combine the images
outImg = imgStack.combine_images()
# Save the image
outImg.write(outFile, dtype=np.float64)
print('\nDone computing average images!')
| mit |
jldbc/pybaseball | pybaseball/standings.py | 1 | 3820 | from typing import List, Optional
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment, PageElement, ResultSet
from . import cache
from .utils import most_recent_season
def get_soup(year: int) -> BeautifulSoup:
url = f'http://www.baseball-reference.com/leagues/MLB/{year}-standings.shtml'
s = requests.get(url).content
return BeautifulSoup(s, "lxml")
def get_tables(soup: BeautifulSoup, season: int) -> List[pd.DataFrame]:
datasets = []
if season >= 1969:
tables: List[PageElement] = soup.find_all('table')
if season == 1981:
# For some reason BRef has 1981 broken down by halves and overall
# https://www.baseball-reference.com/leagues/MLB/1981-standings.shtml
tables = [x for x in tables if 'overall' in x.get('id', '')]
for table in tables:
data = []
headings: List[PageElement] = [th.get_text() for th in table.find("tr").find_all("th")]
data.append(headings)
table_body: PageElement = table.find('tbody')
rows: List[PageElement] = table_body.find_all('tr')
for row in rows:
cols: List[PageElement] = row.find_all('td')
cols_text: List[str] = [ele.text.strip() for ele in cols]
cols_text.insert(0, row.find_all('a')[0].text.strip()) # team name
data.append([ele for ele in cols_text if ele])
datasets.append(data)
else:
data = []
table = soup.find('table')
headings = [th.get_text() for th in table.find("tr").find_all("th")]
headings[0] = "Name"
if season >= 1930:
for _ in range(15):
headings.pop()
elif season >= 1876:
for _ in range(14):
headings.pop()
else:
for _ in range(16):
headings.pop()
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
if row.find_all('a') == []:
continue
cols = row.find_all('td')
if season >= 1930:
for _ in range(15):
cols.pop()
elif season >= 1876:
for _ in range(14):
cols.pop()
else:
for _ in range(16):
cols.pop()
cols = [ele.text.strip() for ele in cols]
cols.insert(0,row.find_all('a')[0].text.strip()) # team name
data.append([ele for ele in cols if ele])
datasets.append(data)
#convert list-of-lists to dataframes
for idx in range(len(datasets)):
datasets[idx] = pd.DataFrame(datasets[idx])
return datasets #returns a list of dataframes
@cache.df_cache()
def standings(season:Optional[int] = None) -> pd.DataFrame:
# get most recent standings if date not specified
if season is None:
season = most_recent_season()
if season < 1876:
raise ValueError(
"This query currently only returns standings until the 1876 season. "
"Try looking at years from 1876 to present."
)
# retrieve html from baseball reference
soup = get_soup(season)
if season >= 1969:
raw_tables = get_tables(soup, season)
else:
t = [x for x in soup.find_all(string=lambda text:isinstance(text,Comment)) if 'expanded_standings_overall' in x]
code = BeautifulSoup(t[0], "lxml")
raw_tables = get_tables(code, season)
tables = [pd.DataFrame(table) for table in raw_tables]
for idx in range(len(tables)):
tables[idx] = tables[idx].rename(columns=tables[idx].iloc[0])
tables[idx] = tables[idx].reindex(tables[idx].index.drop(0))
return tables
| mit |
BrainTech/openbci | obci/analysis/csp/MLogit.py | 1 | 11792 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""This is a class for Multinomial Logit Regression
Class uses scipy.optimize package for minimalization of a cost function.
The gradient of the cost function is passed to the minimizer.
Piotr Milanowski, November 2011, Warsaw
"""
from scipy.optimize import fmin_ncg, fmin_bfgs, fmin
import numpy as np
import matplotlib.pyplot as plt
def mix(x1, x2, deg=6):
out = np.zeros([len(x1), sum(range(deg+2))])
k = 0
for i in xrange(deg+1):
for j in range(i+1):
out[:,k] = x1**(i-j)*x2**(j)
k += 1
return out
class logit(object):
"""This is a class for a normal two-class logistic regression
The hypothesis of this regression is a sigmoid (logistic, logit) function.
It returns the probability of the data belonging to the first class.
The minimalization of a cost function is based on NCG algorithm from scipy.optimize package.
The regression can account the regularization factors.
"""
def __init__(self, data, classes, labels=None):
"""Initialization of data
A column of ones is added to the data array.
Parameters:
===========
data : 2darray
NxM array. Rows of this array represent data points, columns represent features.
classes : 1darray
a N dimensional vector of classes. Each class is represented by either 0 or 1.
class_dict [= None] : dictionary
a 2 element dictionary that maps classses to their names.
Example:
=========
>>>X = np.random.rand(20, 4) #data
>>>Y = np.random.randint(0,2,20) #classes
>>>labels = ['class 1','class 2']
>>>MLogit.logit(X, Y, labels)
"""
self.dataNo, self.featureNo = data.shape
if len(classes) != self.dataNo:
raise ValueError, 'Not every data point has its target lable!'
#Adding a columns of 1s and normalizing data - NO NORMALIZATION NEEDED
self.X = np.concatenate((np.ones([self.dataNo, 1]), data), axis = 1)
self.Y = classes
def _sigmoid(self, z):
"""This returns the value of a sigmoid function.
Sigmoid/Logistic/Logit finction looks like this:
f(z) = over{1}{1 + exp(-z)}
Parameters:
===========
z : ndarray
the parameter of the function
Returns:
sig : ndarray
values of sigmoid function at z
"""
return 1/(1 + np.exp(-z))
def cost_function(self, theta, reg = 0):
"""The cost function of logit regression model
It looks like this:
J(theta) = -((1/M)*sum_{i=1}^{M}(y_i*log(h(theta;x_i))+(1-y_i)*log(1-h(theta;x_i)))) +
+ (reg/2*m)sum_{i=1}^{N}(theta_i)^2
Parameters:
===========
theta : 1darray
the array of parameters. It's a (N+1) dimensional vector
reg [= 0] : float
the regularization parameter. This parameter penalizes theta being too big (overfitting)
Returns:
========
J : float
the value of cost function for given theta
"""
z = self._sigmoid(np.dot(self.X, theta))
regular = (reg/(2.0*self.dataNo))*sum(theta[1:]*theta[1:])
J = self.Y * np.log(z) + (1 - self.Y)*np.log(1 - z)
J = -(1.0 / self.dataNo) * sum(J)
return regular + J
def gradient_function(self, theta, reg = 0):
"""The gradient of cost function
The gradient looks like this:
g[0] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^0
g[j] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^j - theta[j]*reg/N
Parameters:
===========
theta : 1darray
the vector of parameters
reg : float
the regularization parameter
Returns:
========
fprime : 1darray
the gradient of cost function.
"""
gradient = np.zeros(self.featureNo + 1)
N = 1.0 / self.dataNo
z = np.dot(self.X, theta)
cost = self._sigmoid(z) - self.Y
# gradient[0] = N * sum(cost * self.X[:, 0])
# for j in xrange(self.featureNo):
# gradient[j] = N * sum(cost * self.X[:, j]) - reg * N * theta[j]
gradient = N * np.dot(cost, self.X)
gradient[1:] += reg * N * theta[1:]
return gradient
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Minimizing function
Based on NCG function from scipy.optimize package
Parameters:
===========
maxiter : int
maximal number of iterations
reg [= 0] : float
regularization parameter
initial_gueas [= None] : 1darray
a vector of #features + 1 size. If None zeros will be asumed.
Returns:
========
theta : 1darray
optimal model parameters
"""
if initial_gues is None:
initial_gues = np.zeros(self.featureNo + 1)
out = fmin_bfgs(self.cost_function, initial_gues, \
self.gradient_function, args = ([reg]))
self.theta = out
return out
def predict(self, x, val=0.9):
"""For prediction of x
Returns predicted probability of x being in class 1
"""
x = np.insert(x, 0, 1) #inserting one at the beginning
z = np.dot(x, self.theta)
#if self._sigmoid(z) >=val:
#return 1
#else:
#return 0
return self._sigmoid(z)
def plot_features(self, show=True):
y = self.Y
idx = np.argsort(y)
x = self.X[idx, :]
y = y[idx]
N, feats = x.shape
if feats == 3:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
plt.plot(x1[:,1],x1[:,2],'ro',x2[:,1],x2[:,2],'go')
for x in np.arange(-5, 5, 0.5):
for y in np.arange(-3, 3, 0.5):
if self.predict(np.array([x,y])) <=0.5:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.legend(('Class 0','Class 1'))
if show:
plt.show()
elif feats == 2:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
for x in np.arange(x1.min(), x1.max(), 0.1):
for y in np.arange(x2.min(), x2.max(), 0.1):
if self.predict(np.array([x,y])) <=0.01:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.plot(x1[:,1],'ro',x2[:,1],'go')
if show:
plt.show()
else:
print "More than 2 dimmensions",x.shape
# def plot_fitted(self):
# N, feats = self.X.shape
# if feats == 3:
# x1 = se
def __normalization(self, data):
"""Function normalizes the data
Normalization is done by subtracting the mean of each column from each column member
and dividing by the column variance.
Parameters:
===========
data : 2darray
the data array
Returns:
========
norms : 2darray
normalized values
"""
mean = data.mean(axis = 0)
variance = data.std(axis = 0)
return (data - mean) / variance
class mlogit(logit):
"""This is a multivariate variation of logit model
"""
def __init__(self, data, classes, labels=None):
"""See logit description"""
super(mlogit, self).__init__(data, classes, labels)
self.classesNo, classesIdx = np.unique(classes, return_inverse = True)
self.count_table = np.zeros([len(classes), len(self.classesNo)])
self.count_table[range(len(classes)), classesIdx] = 1.0
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Fitting logit model for multiclass case"""
theta = np.zeros([self.featureNo + 1, len(self.classesNo)])
for i in range(len(self.classesNo)):
self.Y = self.count_table[:,i]
theta[:, i] = super(mlogit, self).fit(maxiter, reg = reg, initial_gues = initial_gues)
self.theta = theta
return theta
def predict(self, x, val=0.9):
"""Class prediction"""
x = np.insert(x, 0, 1)
z = np.dot(x, self.theta)
probs = super(mlogit, self)._sigmoid(z)
idx = np.argmax(probs)
if probs[idx] >= val:
return self.classesNo[idx]
else:
return None
def plot_features(self):
cn = len(self.classesNo)
idx = np.argsort(self.Y)
y = self.Y[idx]
x = self.X[idx,:]
classes = []
if x.shape[1] == 3:
for i in range(cn):
beg, end = np.where(y==i)[0][[0,-1]]
plt.plot(x[beg:end+1, 1], x[beg:end +1, 2],'o')
classes.append('Class'+str(i))
plt.legend(classes)
plt.show()
else:
print "More than 2 dimmesions"
#class diagnostics(object):
# def __init__(self, classifier_obj, division=[0.6, 0.2, 0.2]):
# self.obj = classifier_obj
# self.div = division
# self.N, self.ft = self.obj.dataNo, self.obj.featureNo
# self.cvNo = self.N * division[1]
# self.testNo = self.N * division[2]
# self.trainNo = self.N * division[0]
# def diagnose(self, iters, reg, odrer=1, val=0.9):
# idx = np.linspace(0, self.N-1, self.N)
# TP, FP, TN, FN
# train_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# cv_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# test_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# X = self.obj.X
# Y = self.obj.Y
# for i in xrange(iters):
# np.random.shuffle(idx)
# train_set = X[idx[:self.trainNo], :]
# cv_set = X[idx[self.trainNo:self.trainNo+self.cvNo], :]
# test_set = X[idx[self.trainNo+self.cvNo:], :]
# classes_train = Y[idx[:self.trainNo], :]
# classes_cv = Y[idx[self.trainNo:self.trainNo+self.cvNo], :]
# classes_test = Y[idx[self.trainNo+self.cvNo:], :]
# Training
# self.obj.X = train_set
# self.obj.Y = classes_train
# self.obj.fit(100)
# for j, row in enumerate(train_set):
# cl = self.obj.predict(row, val)
# if cl == classes_train[j]:
# train_ok['tp'] += 1
# elif cl is None:
# train_ok['fn'] += 1
# else:
# train_ok['fp'] += 1
# Crossvalidation
# for j, row in enumerate(cv_set):
# cl = self.obj.predict(row, val)
# if cl == classes_cv[j]:
# cv_ok['tp'] += 1
# elif cl in None:
# cv_ok['fn'] += 1
# else:
# cv_ok['fp'] += 1
# Test set
# for j, row in enumerate(test_set):
# cl = self.obj.predict(row, val)
# if cl == classes_test[j]:
# test_ok['tp'] += 1
# elif cl is None:
# test_ok['fn'] += 1
# else:
# test_ok['fp'] += 1
# def power_set(self, lst, l):
# """Create a powerset of a list for given length"""
# r = [[]]
# for e in lst:
# r.extend([s + [e] for s in r])
# return set([j for j in r if len(j) <= l])
# def next_order(self, kernel, next_o):
# def make_order(self, p):
# init_featsNo = self.featNo
| gpl-3.0 |
vortex-exoplanet/VIP | vip_hci/negfc/utils_negfc.py | 2 | 8821 | #! /usr/bin/env python
"""
Module with post-processing related functions called from within the NFC
algorithm.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['cube_planet_free']
import numpy as np
from ..metrics import cube_inject_companions
import math
from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show
def cube_planet_free(planet_parameter, cube, angs, psfn, plsc, imlib='opencv',
interpolation='lanczos4',transmission=None):
"""
Return a cube in which we have injected negative fake companion at the
position/flux given by planet_parameter.
Parameters
----------
planet_parameter: numpy.array or list
The (r, theta, flux) for all known companions. For a 4d cube r,
theta and flux must all be 1d arrays with length equal to cube.shape[0];
i.e. planet_parameter should have shape: (n_pl,3,n_ch).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
cpf : numpy.array
The cube with negative companions injected at the position given in
planet_parameter.
"""
cpf = np.zeros_like(cube)
planet_parameter = np.array(planet_parameter)
if cube.ndim == 4:
if planet_parameter.shape[3] != cube.shape[0]:
raise TypeError("Input planet parameter with wrong dimensions.")
for i in range(planet_parameter.shape[0]):
if i == 0:
cube_temp = cube
else:
cube_temp = cpf
if cube.ndim == 4:
for j in cube.shape[0]:
cpf[j] = cube_inject_companions(cube_temp[j], psfn[j], angs,
flevel=-planet_parameter[i, 2, j],
plsc=plsc,
rad_dists=[planet_parameter[i, 0, j]],
n_branches=1,
theta=planet_parameter[i, 1, j],
imlib=imlib,
interpolation=interpolation,
verbose=False,
transmission=transmission)
else:
cpf = cube_inject_companions(cube_temp, psfn, angs,
flevel=-planet_parameter[i, 2], plsc=plsc,
rad_dists=[planet_parameter[i, 0]],
n_branches=1, theta=planet_parameter[i, 1],
imlib=imlib, interpolation=interpolation,
verbose=False, transmission=transmission)
return cpf
def radial_to_eq(r=1, t=0, rError=0, tError=0, display=False):
"""
Convert the position given in (r,t) into \delta RA and \delta DEC, as
well as the corresponding uncertainties.
t = 0 deg (resp. 90 deg) points toward North (resp. East).
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate.
rError: float
The error bar related to r.
tError: float
The error bar related to t.
display: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError,r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if display:
plot(u,v,'ks',x,y,'r')
plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')
plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)),'ok')
plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)),'ok')
plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r,
np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')
plot([0,r*np.cos(nu+math.radians(tError*0))],
[0,r*np.sin(nu+math.radians(tError*0))],'k')
axes().set_aspect('equal')
lim = np.amax([a,b]) * 2.
xlim([ra-lim,ra+lim])
ylim([dec-lim,dec+lim])
gca().invert_xaxis()
show()
return ((ra,np.mean([raErrorInf,raErrorSup])),
(dec,np.mean([decErrorInf,decErrorSup])))
def cart_to_polar(y, x, ceny=0, cenx=0):
"""
Convert cartesian into polar coordinates (r,theta) with
respect to a given center (cenx,ceny).
Parameters
----------
x,y: float
The cartesian coordinates.
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-ceny)**2 + (x-cenx)**2)
theta = np.degrees(np.arctan2(y-ceny, x-cenx))
return r, np.mod(theta,360)
def polar_to_cart(r, theta, ceny=0, cenx=0):
"""
Convert polar coordinates with respect to the center (cenx,ceny) into
cartesian coordinates (x,y) with respect to the bottom left corner of the
image..
Parameters
----------
r,theta: float
The polar coordinates.
Returns
-------
out : tuple
The cartesian coordinates (x,y) with respect to the bottom left corner
of the image..
"""
x = r*np.cos(np.deg2rad(theta)) + cenx
y = r*np.sin(np.deg2rad(theta)) + ceny
return x,y
def ds9index_to_polar(y, x, ceny=0, cenx=0):
"""
Convert pixel index read on image displayed with DS9 into polar coordinates
(r,theta) with respect to a given center (cenx,ceny).
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-0.5-ceny)**2 + (x-0.5-cenx)**2)
theta = np.degrees(np.arctan2(y-0.5-ceny, x-0.5-cenx))
return r, np.mod(theta,360)
def polar_to_ds9index(r, theta, ceny=0, cenx=0):
"""
Convert position (r,theta) in an image with respect to a given center
(cenx,ceny) into position in the image displayed with DS9.
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
x_ds9 = r*np.cos(np.deg2rad(theta)) + 0.5 + cenx
y_ds9 = r*np.sin(np.deg2rad(theta)) + 0.5 + ceny
return x_ds9, y_ds9 | mit |
chaluemwut/smcdemo | demo_filter.py | 1 | 2602 | import pickle
from feature_process import FeatureMapping
import feature_process
from text_processing import TextProcessing
from sklearn.cross_validation import train_test_split
is_not_important = {9:0,
13:0,
18:0,
19:0,
23:0,
28:0,
29:0,
33:0,
34:0,
37:0,
40:0,
44:0,
46:0,
50:0,
55:0,
59:0,
61:0,
62:0,
63:0,
72:0,
73:0,
78:0,
84:0,
86:0,
88:0,
97:0,
98:0,
103:0
}
def create_training_data():
data_lst = pickle.load(open('data/harvest.data', 'rb'))
feature_process.feature_map['source'] = {'Google':1, 'Twitter for iPad':2, 'Echofon':3,
'Bitly':4, 'twitterfeed':5, 'Twitter for iPhone':6,
'Foursquare':7, 'Facebook':8, 'Twitter for Android':9,
'TweetDeck':10, 'Twitter Web Client':11}
feature_process.feature_map['geo'] = ['None']
feature_process.feature_map['place'] = ['None']
feature_process.feature_map['verified'] = ['False']
feature_process.feature_map['geo_enabled'] = ['False']
y = []
x = []
for i in range(0, len(data_lst)):
try:
label = is_not_important[i]
except Exception as e:
label = 1
data = data_lst[i]
text = TextProcessing.process(data[0])
source = FeatureMapping.mapping('source', data[1])
re_tweet = data[2]
geo = FeatureMapping.mapping_other('geo', data[3])
place = FeatureMapping.mapping_other('place', data[4])
hash_tag = data[5]
media = data[6]
verified = FeatureMapping.mapping_other('verified', data[7])
follower = data[8]
statues = data[9]
desc = TextProcessing.process(data[10])
friend = data[11]
location = TextProcessing.process(data[12])
geo_enabled = FeatureMapping.mapping_other('geo_enabled', data[13])
y.append(label)
x.append([text, source, re_tweet, geo, place, hash_tag, media, verified, follower, statues, desc, friend, location, geo_enabled])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score
clf = RandomForestClassifier()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
fsc = f1_score(y_test, y_pred)
acc = accuracy_score(y_test, y_pred)
print 'f1-score : ',fsc
print 'accuracy : ',acc
print y_pred
print y_test
if __name__ == '__main__':
create_training_data()
| apache-2.0 |
ChanChiChoi/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
ShujiaHuang/AsmVar | src/AsmvarGenotype/GMM/GMM2D.py | 2 | 18363 | """
================================================
My own Gaussion Mixture Model for SV genotyping.
Learn form scikit-learn
================================================
Author : Shujia Huang
Date : 2014-01-06 14:33:45
"""
import sys
import numpy as np
from scipy import linalg
from sklearn import cluster
from sklearn.base import BaseEstimator
from sklearn.utils.extmath import logsumexp
EPS = np.finfo(float).eps
class GMM ( BaseEstimator ) :
"""
Copy from scikit-learn
"""
def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=10, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.init_means = []
self.init_covars = []
self.category = [] # For genotype
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError( 'Invalid value for covariance_type: %s' % covariance_type )
if n_init < 1: raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def predict(self, X):
"""
Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""
Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def fit(self, X):
"""
Copy form scikit-learn: gmm.py
Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
lowest_bias = np.infty
c1,c2,c3 = '1/1', '0/1', '0/0'
m1,m2,m3 = 0.001 , 0.5 , 1.0
v1,v2,v3 = 0.002, 0.002, 0.002
category = np.array([ [c1,c2,c3],
[c1,c2], [c1,c3], [c2,c3] ,
[c1] , [c2] , [c3] ])
init_means = np.array([ [[ m1],[ m2] , [ m3]],
[[ m1],[ m2]], [[m1],[m3]], [[m2],[m3]],
[[m1]] , [[m2]] , [[m3]] ])
init_covars = np.array([ [[[ v1]],[[ v2]],[[ v3]]],
[[[ v1]],[[ v2]]], [[[ v1]],[[ v3]]], [[[ v2]],[[ v3]]],
[[[ v1]]] , [[[ v2]]] , [[[ v3]]] ])
bestCovars, bestMeans, bestWeights, bestConverged, bestCategory = [], [], [], [], []
for i, (m,v,c) in enumerate( zip(init_means, init_covars, category) ) :
if i == 0 and self.n_components != 3 : continue
if i < 4 and self.n_components == 1 : continue
self.init_means = np.array(m)
self.init_covars = np.array(v)
self.category = np.array(c)
best_params,bias = self.training(X)
if lowest_bias > bias :
lowest_bias = bias
bestCovars = best_params['covars']
bestMeans = best_params['means']
bestWeights = best_params['weights']
bestConverged = best_params['converged']
bestCategory = best_params['category']
if self.n_components == 3 : break
if self.n_components == 2 and i == 3 : break
bestWeights = np.tile(1.0 / self.n_components, self.n_components)
self.covars_ = bestCovars
self.means_ = bestMeans
self.weights_ = bestWeights
self.converged_ = bestConverged
self.category = bestCategory
return self
####
def training(self, X):
max_log_prob = -np.infty
lowest_bias = np.infty
wmin, wmax = 0.8, 1.2 # Factor intervel [wmin, wmax]
for w in np.linspace(wmin, wmax, self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
#self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_
self.means_ = w * self.init_means
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_= np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
"""
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape :
cv.shape = (1, 1)
self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components)
"""
self.covars_ = self.init_covars
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_= False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < self.thresh:
self.converged_ = True
break
#Maximization step
self._do_mstep(X, responsibilities, self.params, self.min_covar)
if self.n_components == 3:
curr_bias =(self.means_[0][0]-self.init_means[0][0])+np.abs(self.means_[1][0]-self.init_means[1][0])+(self.init_means[2][0]-self.means_[2][0])
elif self.n_components == 2:
curr_bias =np.abs(self.means_[0][0] - self.init_means[0][0]) + np.abs(self.init_means[1][0] - self.means_[1][0])
elif self.n_components == 1:
curr_bias =np.abs (self.means_[0][0] - self.init_means[0][0])
else :
print >> sys.stderr, '[ERROR] The companent could only between [1,3]. But yours is ', self.n_components
sys.exit(1)
self.Label2Genotype()
if w == wmin:
max_log_prob = log_likelihood[-1]
best_params = {'weights':self.weights_,
'means':self.means_,
'covars':self.covars_,
'converged':self.converged_,
'category':self.category}
if self.converged_:
lowest_bias = curr_bias
if self.converged_ and lowest_bias > curr_bias:
max_log_prob = log_likelihood[-1]
lowest_bias = curr_bias
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_,
'converged': self.converged_,
'category':self.category}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data." )
# if neendshift :
# self.covars_ = tmp_params['covars']
# self.means_ = tmp_params['means']
# self.weights_ = tmp_params['weights']
# self.converged_ = tmp_params['converged']
# self.category = tmp_params['category']
return best_params, lowest_bias
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""
Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(self, X, responsibilities, weighted_X_sum, inverse_weights,min_covar)
return weights
"""
Here is just for genotyping process
"""
# Decide the different guassion mu(mean) to seperate the genotype
def Label2Genotype(self):
label2genotype = {}
if self.converged_:
if len(self.means_) > 3 :
print >> sys.stderr, 'Do not allow more than 3 components. But you set', len(self.means_)
sys.exit(1)
for label,mu in enumerate(self.means_[:,0]):
best_distance, bestIndx = np.infty, 0
for i,m in enumerate(self.init_means[:,0]):
distance = np.abs(mu - m)
if distance < best_distance:
bestIndx = i
best_distance = distance
label2genotype[label] = self.category[bestIndx]
# Put False if there are more than one 'label' points to the same 'genotype'
g2c = {v:k for k,v in label2genotype.items()}
if len(label2genotype) != len(g2c): self.converged_ = False
else :
label2genotype = { label: './.' for label in range( self.n_components ) }
return label2genotype
def Mendel(self, genotype, sample2col, family):
ngIndx = []
m,n,num = 0.0,0.0,0 # m is match; n is not match
for k,v in family.items():
#if v[0] not in sample2col or v[1] not in sample2col : continue
if k not in sample2col or v[0] not in sample2col or v[1] not in sample2col: continue
if k not in sample2col :
print >> sys.stderr, 'The sample name is not in vcf file! ', k
sys.exit(1)
# c1 is son; c2 and c3 are the parents
c1,c2,c3 = genotype[ sample2col[k] ], genotype[ sample2col[v[0]] ], genotype[ sample2col[v[1]] ]
if c1 == './.' or c2 == './.' or c3 == './.': continue
num += 1;
ng = False
if c2 == c3 :
if c2 == '0/0' or c2 == '1/1' :
if c1 == c2 : m += 1
else :
n += 1
ng = True
else : # c2 == '0/1' and c3 == '0/1'
m += 1
elif c2 == '0/1' and c3 == '1/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '0/1' and c3 == '0/0' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/0' :
if c1 == '1/1' or c1 == '0/0':
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '0/1' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '1/1' :
if c1 == '0/0' or c1 == '1/1' :
n += 1
ng = True
else : m += 1
if ng :
ngIndx.append(sample2col[k])
ngIndx.append(sample2col[v[0]])
ngIndx.append(sample2col[v[1]])
return m,n,num,set(ngIndx)
###
def log_multivariate_normal_density(X, means, covars, covariance_type='full'):
"""
Log probability for full covariance matrices.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([])
if X.shape[1] != means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
log_multivariate_normal_density_dict = {
'full' : _log_multivariate_normal_density_full
}
return log_multivariate_normal_density_dict[covariance_type]( X, means, covars )
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""
Log probability for full covariance matrices.
"""
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components) :
"""
Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
_covar_mstep_funcs = { 'full': _covar_mstep_full }
| mit |
kcyu1993/ML_course_kyu | projects/project1/scripts/model.py | 1 | 19450 | from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import copy
from data_utils import build_k_indices
from learning_model import *
from regularizer import *
from helpers import save_numpy_array
import numpy as np
class Model(object):
"""
Author: Kaicheng Yu
Machine learning model engine
Implement the optimizers
sgd
normal equations
cross-validation of given parameters
Abstract method:
__call__ produce the raw prediction, use the latest weight obtained by training
predict produce prediction values, could take weight as input
get_gradient define gradient here, including the gradient for regularizer
normalequ define normal equations
Support:
L1, L2 normalization
Due to the distribution of work, only LogisticRegression is fully tested for
fitting data, and cross-validation.
LinearRegression model should also work but not fully tested.
The goal of this class is not only specific to this learning project, but also for reusable and scalable
to other problems, models.
"""
def __init__(self, train_data, validation=None, initial_weight=None,
loss_function_name='mse', cal_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Initializer of all learning models.
:param train_data: training data.
:param validation_data:
"""
self.train_x = train_data[1]
self.train_y = train_data[0]
self.set_valid(validation)
''' Define the progress of history here '''
self.losses = []
self.iterations = 0
self.weights = []
self.misclass_rate = []
''' Define loss, weight calculation, regularizer '''
self.loss_function = get_loss_function(loss_function_name)
self.loss_function_name = loss_function_name
self.calculate_weight = cal_weight
self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p)
self.regularizer_p = regularizer_p
# Asserting degree
if len(self.train_x.shape) > 1:
degree = self.train_x.shape[1]
else:
degree = 1
# Initialize the weight for linear model.
if initial_weight is not None:
self.weights.append(initial_weight)
else:
self.weights.append(np.random.rand(degree))
def set_valid(self, validation):
# Set validation here.
self.validation = False
self.valid_x = None
self.valid_y = None
self.valid_losses = None
self.valid_misclass_rate = None
if validation is not None:
(valid_y, valid_x) = validation
self.valid_x = valid_x
self.valid_y = valid_y
self.validation = True
self.valid_losses = []
self.valid_misclass_rate = []
@abstractmethod
def __call__(self, **kwargs):
"""Define the fit function and get prediction"""
raise NotImplementedError
@abstractmethod
def get_gradient(self, y, x, weight):
raise NotImplementedError
@abstractmethod
def predict(self, x, weight):
raise NotImplementedError
@abstractmethod
def normalequ(self, **kwargs):
''' define normal equation method to calculate optimal weights'''
raise NotImplementedError
def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):
""" Return weight under given parameter """
model = copy.copy(self)
model.__setattr__('train_y', y)
model.__setattr__('train_x', x)
if test_x is not None and test_y is not None:
model.set_valid((test_y, test_x))
_kwargs = []
for name, value in kwargs.items():
# Recognize parameter "
if name is "regularizer_p":
model.__setattr__(name, value)
model.regularizer.set_parameter(value)
else:
_kwargs.append((name, value))
_kwargs = dict(_kwargs)
if model.calculate_weight is 'gradient':
return model.sgd(**_kwargs)
# elif model.calculate_weight is 'newton':
# return model.newton(**_kwargs)
elif model.calculate_weight is 'normalequ':
return model.normalequ(**_kwargs)
def get_history(self):
"""
Get the training history of current model
:return: list as [iterations, [losses], [weights], [mis_class]]
"""
if self.validation:
return self.iterations, (self.losses, self.valid_losses), \
(self.weights), (self.misclass_rate, self.valid_misclass_rate)
return self.iterations, self.losses, self.weights, self.misclass_rate
def train(self, optimizer='sgd', loss_function='mse', **kwargs):
"""
Train function to perform one time training
Will based optimizer to select.
TODO: Would add 'newton' in the future
This
:param optimizer: only support 'sgd'
:param loss_function: loss_function name {mse, mae, logistic}
:param kwargs: passed into sgd
:return: best weight
"""
self.loss_function = get_loss_function(loss_function)
self.loss_function_name = loss_function
if optimizer is 'sgd':
self.sgd(**kwargs)
return self.weights[-1]
"""===================================="""
""" Beginning of the optimize Routines """
"""===================================="""
def sgd(self, lr=0.01, decay=0.5, max_iters=1000,
batch_size=128, early_stop=150, decay_intval=50, decay_lim=9):
"""
Define the SGD algorithm here
Implementing weight decay, early stop.
:param lr: learning rate
:param decay: weight decay after fix iterations
:param max_iters: maximum iterations
:param batch_size: batch_size
:param early_stop: early_stop after no improvement
:return: final weight vector
"""
np.set_printoptions(precision=4)
w = self.weights[0]
loss = self.compute_loss(self.train_y, self.train_x, w)
best_loss = loss
best_counter = 0
decay_counter = 0
# print("initial loss is {} ".format(loss))
for epoch in range(max_iters):
for batch_y, batch_x in batch_iter(self.train_y, self.train_x, batch_size):
grad = self.get_gradient(batch_y, batch_x, w)
w = w - lr * grad
loss = self.compute_loss(self.train_y, self.train_x, w)
mis_class = self.compute_metrics(self.train_y, self.train_x, w)
self.weights.append(w)
self.losses.append(loss)
self.misclass_rate.append(mis_class)
if self.validation is True:
valid_loss = self.compute_loss(self.valid_y, self.valid_x, w)
valid_mis_class = self.compute_metrics(self.valid_y, self.valid_x, w)
self.valid_losses.append(valid_loss)
self.valid_misclass_rate.append(valid_mis_class)
# Display every 25 epoch
if (epoch + 1) % 25 == 0:
print('Epoch {e} in {m}'.format(e=epoch + 1, m=max_iters), end="\t")
if self.validation is True:
# print('\tTrain Loss {0:0.4f}, \tTrain mis-class {0:0.4f}, '
# '\tvalid loss {0:0.4f}, \tvalid mis-class {0:0.4f}'.
# format(loss, mis_class, valid_loss, valid_mis_class))
print('\tTrain Loss {}, \tTrain mis-class {}, '
'\tvalid loss {}, \tvalid mis-class {}'.
format(loss, mis_class, valid_loss, valid_mis_class))
else:
print('\tTrain Loss {}, \tTrain mis-class {}'.
format(loss, mis_class))
# judge the performance
if best_loss - loss > 0.000001:
best_loss = loss
best_counter = 0
else:
best_counter += 1
if best_counter > early_stop:
print("Learning early stop since loss not improving for {} epoch.".format(best_counter))
break
if best_counter % decay_intval == 0:
print("weight decay by {}".format(decay))
lr *= decay
decay_counter += 1
if decay_counter > decay_lim:
print("decay {} times, stop".format(decay_lim))
break
return self.weights[-1]
def newton(self, lr=0.01, max_iters=100):
# TODO: implement newton method later
raise NotImplementedError
def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs):
"""
Cross validation method to acquire the best prediction parameters.
It will use the train_x y as data and do K-fold cross validation.
:param cv: cross validation times
:param lambdas: array of lambdas to be validated
:param lambda_name: the lambda name tag
:param seed: random seed
:param skip: skip the cross validation, only valid 1 time
:param plot plot cross-validation plot, if machine does not
support matplotlib.pyplot, set to false.
:param kwargs: other parameters could pass into compute_weight
:return: best weights, best_lambda, (training error, valid error)
"""
np.set_printoptions(precision=4)
k_indices = build_k_indices(self.train_y, cv, seed)
# define lists to store the loss of training data and test data
err_tr = []
err_te = []
weights = []
print("K-fold ({}) cross validation to examine [{}]".
format(cv, lambdas))
for lamb in lambdas:
print("For lambda: {}".format(lamb))
_mse_tr = []
_mse_te = []
_weight = []
for k in range(cv):
print('Cross valid iteration {}'.format(k))
weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x,
k_indices, k,
lamb, lambda_name, **kwargs)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
_weight.append(weight)
if skip:
break
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
err_tr += [avg_tr]
err_te += [avg_te]
weights.append(_weight)
print("\t train error {}, \t valid error {}".
format(avg_tr, avg_te))
# Select the best parameter during the cross validations.
print('K-fold cross validation result: \n {} \n {}'.
format(err_tr, err_te))
# Select the best based on least err_te
min_err_te = np.argmin(err_te)
print('Best err_te result {}, lambda {}'.
format(err_te[min_err_te], lambdas[min_err_te]))
if plot:
from plots import cross_validation_visualization
cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name,
error_name=self.loss_function_name)
else:
save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name)
return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te)
def _loop_cross_validation(self, y, x, k_indices, k, lamb, lambda_name, **kwargs):
"""
Single loop of cross validation
:param y: train labels
:param x: train data
:param k_indices: indices array
:param k: number of cross validations
:param lamb: lambda to use
:param lambda_name: lambda_name to pass into compute weight
:return: weight, mis_tr, mis_te
"""
train_ind = np.concatenate((k_indices[:k], k_indices[k + 1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# Insert one more kwargs item
kwargs[lambda_name] = lamb
weight = self.compute_weight(train_y, train_x, test_x, test_y, **kwargs)
# Compute the metrics and return
loss_tr = self.compute_metrics(train_y, train_x, weight)
loss_te = self.compute_metrics(test_y, test_x, weight)
return weight, loss_tr, loss_te
def compute_metrics(self, target, data, weight):
"""
Compute the following metrics
Misclassification rate
"""
pred = self.predict(data, weight)
assert len(pred) == len(target)
# Calculate the mis-classification rate:
N = len(pred)
pred = np.reshape(pred, (N,))
target = np.reshape(target, (N,))
nb_misclass = np.count_nonzero(target - pred)
return nb_misclass / N
def compute_loss(self, y, x, weight):
return self.loss_function(y, x, weight)
class LogisticRegression(Model):
""" Logistic regression """
def __init__(self, train, validation=None, initial_weight=None,
loss_function_name='logistic',
calculate_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Constructor of Logistic Regression model
:param train: tuple (y, x)
:param validation: tuple (y, x)
:param initial_weight: weight vector, dim align x
:param loss_function: f(x, y, weight)
:param regularizer: "Ridge" || "Lasso"
:param regularizer_p: parameter
"""
# Initialize the super class with given data.
# Transform the y into {0,1}
y, tx = train
y[np.where(y < 0)] = 0
train = (y, tx)
if validation:
val_y, val_tx = validation
val_y[np.where(val_y < 0)] = 0
validation = (val_y, val_tx)
super(LogisticRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
# Set predicted label
self.pred_label = [-1, 1]
def __call__(self, x, weight=None):
"""
Define the fit function and get prediction,
generate probability of occurrence
"""
if weight is None:
weight = self.weights[-1]
return sigmoid(np.dot(x, weight))
def get_gradient(self, y, x, weight):
""" calculate gradient given data and weight """
y = np.reshape(y, (len(y),))
return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \
+ self.regularizer.get_gradient(weight)
def get_hessian(self, y, x, weight):
# TODO: implement hessian for newton method
raise NotImplementedError
def predict(self, x, weight=None, cutting=0.5):
""" Prediction of event {0,1} """
if weight is None: weight = self.weights[-1]
pred = sigmoid(np.dot(x, weight))
pred[np.where(pred <= cutting)] = 0
pred[np.where(pred > cutting)] = 1
return pred
def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):
""" Prediction result with labels """
if predict_label is None:
predict_label = self.pred_label
if weight is None: weight = self.weights[-1]
pred = self.predict(x, weight, cutting)
pred[np.where(pred == 0)] = predict_label[0]
pred[np.where(pred == 1)] = predict_label[1]
return pred
def train(self, loss_function='logistic',
lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):
""" Make the default loss logistic, set default parameters """
return super(LogisticRegression, self).train('sgd', loss_function,
lr=lr,
decay=decay, max_iters=max_iters,
batch_size=batch_size, **kwargs)
def normalequ(self, **kwargs):
""" Should never call """
raise NotImplementedError
class LinearRegression(Model):
""" Linear regression model
This is not fully tested, especially the cross-validation, please refers
to the implemenations.py for linear model.
"""
def __init__(self, train, validation=None, initial_weight=None,
regularizer=None, regularizer_p=None,
loss_function_name='mse', calculate_weight='normalequ'):
# Initialize the super class with given data.
super(LinearRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
def __call__(self, x):
""" calulate prediction based on latest result """
return np.dot(x, self.weights[-1])
def get_gradient(self, batch_y, batch_x, weight):
""" return gradient of linear model, including the regularizer """
N = batch_y.shape[0]
grad = np.empty(len(weight))
for index in range(N):
_y = batch_y[index]
_x = batch_x[index]
grad = grad + gradient_least_square(_y, _x, weight, self.loss_function_name)
grad /= N
grad += self.regularizer.get_gradient(weight)
return grad
def predict(self, x, weight):
""" Prediction function, predicting final result """
pred = np.dot(x, weight)
pred[np.where(pred <= 0)] = -1
pred[np.where(pred > 0)] = 1
return pred
def normalequ(self):
""" Normal equation to get parameters """
tx = self.train_x
y = self.train_y
if self.regularizer is None:
return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y))
elif self.regularizer.name is 'Ridge':
G = np.eye(tx.shape[1])
G[0, 0] = 0
hes = np.dot(tx.T, tx) + self.regularizer_p * G
return np.linalg.solve(hes, np.dot(tx.T, y))
else:
raise NotImplementedError
| mit |
jmetzen/scikit-learn | sklearn/base.py | 22 | 18131 | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as ChangedBehaviorWarning_
class ChangedBehaviorWarning(ChangedBehaviorWarning_):
pass
ChangedBehaviorWarning = deprecated("ChangedBehaviorWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(ChangedBehaviorWarning)
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
htygithub/bokeh | bokeh/sampledata/gapminder.py | 41 | 2655 | from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
| bsd-3-clause |
huongttlan/bokeh | bokeh/compat/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
joelfrederico/SciSalt | scisalt/qt/mplwidget.py | 1 | 13557 | from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar
import matplotlib as _mpl
import numpy as _np
from .Rectangle import Rectangle
import pdb
import traceback
import logging
loggerlevel = logging.DEBUG
logger = logging.getLogger(__name__)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Slider_and_Text(QtGui.QWidget):
valueChanged = QtCore.pyqtSignal(int)
sliderReleased = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.setMaximumHeight(40)
# Enable tracking by default
self._tracking = True
self.hLayout = QtGui.QHBoxLayout()
self.slider = QtGui.QSlider()
self.leftbutton = QtGui.QPushButton()
self.leftbutton.setText("<")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth())
# self.leftbutton.setSizePolicy(sizePolicy)
self.leftbutton.clicked.connect(self._subone)
self.rightbutton = QtGui.QPushButton()
self.rightbutton.setText(">")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth())
# self.rightbutton.setSizePolicy(sizePolicy)
self.rightbutton.clicked.connect(self._addone)
self.v = QtGui.QIntValidator()
self.box = QtGui.QLineEdit()
self.box.setValidator(self.v)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth())
# self.box.setSizePolicy(sizePolicy)
self.hLayout.addWidget(self.leftbutton)
self.hLayout.addWidget(self.slider)
self.hLayout.addWidget(self.box)
self.hLayout.addWidget(self.rightbutton)
self.setLayout(self.hLayout)
self.slider.valueChanged.connect(self._sliderChanged)
self.box.editingFinished.connect(self._textChanged)
self.setOrientation(QtCore.Qt.Horizontal)
# Connect release so tracking works as expected
self.slider.sliderReleased.connect(self._sliderReleased)
def _addone(self):
self.value = self.value + 1
self.valueChanged.emit(self.value)
def _subone(self):
self.value = self.value - 1
self.valueChanged.emit(self.value)
def _sliderReleased(self):
print('Released')
self.sliderReleased.emit(self.slider.value)
def setTracking(self, val):
print('Tracking set to {}'.format(val))
self._tracking = val
def setMaximum(self, val):
self.slider.setMaximum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def setMinimum(self, val):
self.slider.setMinimum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def _sliderChanged(self, val):
self.box.setText(str(val))
if self._tracking:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.valueChanged.emit(val)
else:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.slider.sliderReleased.connect(self._sliderChanged_notracking)
def _sliderChanged_notracking(self):
val = self.slider.value()
# print('Value to be emitted is {}'.format(val))
self.valueChanged.emit(val)
def _textChanged(self):
val = self.box.text()
self.slider.setValue(int(val))
self._sliderChanged_notracking()
def setOrientation(self, *args, **kwargs):
self.slider.setOrientation(*args, **kwargs)
def _getValue(self):
return self.slider.value()
def _setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
value = property(_getValue, _setValue)
def setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
# self.valueChanged.emit(val)
class Mpl_Plot(_FigureCanvas):
def __init__(self, parent=None):
# Initialize things
self.fig = _mpl.figure.Figure()
_FigureCanvas.__init__(self, self.fig)
_FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self)
# Create axes
self.ax = self.fig.add_subplot(111)
def plot(self, *args, **kwargs):
self.ax.clear()
self.ax.plot(*args, **kwargs)
self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
self.ax.figure.canvas.draw()
class Mpl_Image(QtGui.QWidget):
# Signal for when the rectangle is changed
rectChanged = QtCore.pyqtSignal(Rectangle)
def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None):
# Initialize things
QtGui.QWidget.__init__(self)
self.rectbool = rectbool
self._clim_min = 0
self._clim_max = 3600
self._pressed = False
# Add a vertical layout
self.vLayout = QtGui.QVBoxLayout()
# Add a figure
self.fig = _mpl.figure.Figure()
# Add a canvas containing the fig
self.canvas = _FigureCanvas(self.fig)
_FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self.canvas)
# Setup the layout
if toolbarbool:
self.toolbar = _NavigationToolbar(self.canvas, self)
self.toolbar.setMaximumHeight(20)
self.vLayout.addWidget(self.toolbar)
self.vLayout.addWidget(self.canvas)
self.setLayout(self.vLayout)
# Create axes
self.ax = self.fig.add_subplot(111)
# Include rectangle functionality
if rectbool:
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.Rectangle = Rectangle(
x = -10 ,
y = 0 ,
width = 0 ,
height = 3 ,
axes = self.ax
)
# Add image
self.image = image
def _get_img(self):
return self._image
def _set_img(self, image):
self.ax.clear()
self._image = image
if image is not None:
self._imgplot = self.ax.imshow(image, interpolation='none')
if self.rectbool:
self.ax.add_patch(self.Rectangle.get_rect())
# imagemax = _np.max(_np.max(image))
self.set_clim(self._clim_min, self._clim_max)
image = property(_get_img, _set_img)
def set_clim(self, clim_min, clim_max):
if self.image is not None:
self._clim_min = clim_min
self._clim_max = clim_max
self._imgplot.set_clim(clim_min, clim_max)
self.ax.figure.canvas.draw()
def on_press(self, event):
if self.toolbar._active is None:
self._pressed = True
self.x0 = event.xdata
self.y0 = event.ydata
logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0))
def on_release(self, event):
if self._pressed:
self._pressed = False
print('release')
self.x1 = event.xdata
self.y1 = event.ydata
width = self.x1 - self.x0
height = self.y1 - self.y0
logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format(
self.x0 ,
self.y0 ,
self.x1 ,
self.y1 ,
width ,
height
)
)
self.Rectangle.set_xy((self.x0, self.y0))
self.Rectangle.set_width(width)
self.Rectangle.set_height(height)
self.ax.figure.canvas.draw()
self.rectChanged.emit(self.Rectangle)
# print(self.rect)
def zoom_rect(self, border=None, border_px=None):
# ======================================
# Get x coordinates
# ======================================
x0 = self.Rectangle.get_x()
width = self.Rectangle.get_width()
x1 = x0+width
# ======================================
# Get y coordinates
# ======================================
y0 = self.Rectangle.get_y()
height = self.Rectangle.get_height()
y1 = y0+height
# ======================================
# Validate borders
# ======================================
if (border_px is None) and (border is not None):
xborder = border[0]*width
yborder = border[1]*height
elif (border_px is not None) and (border is None):
xborder = border_px[0]
yborder = border_px[1]
elif (border_px is None) and (border is None):
raise IOError('No border info specified!')
elif (border_px is not None) and (border is not None):
raise IOError('Too much border info specified, both border_px and border!')
else:
raise IOError('End of the line!')
# ======================================
# Add borders
# ======================================
x0 = x0 - xborder
x1 = x1 + xborder
y0 = y0 - yborder
y1 = y1 + yborder
# ======================================
# Validate coordinates to prevent
# unPythonic crash
# ======================================
if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])):
print('X issue')
print('Requested: x=({}, {})'.format(x0, x1))
x0 = 0
x1 = self.image.shape[1]
if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])):
print('y issue')
print('Requested: y=({}, {})'.format(y0, y1))
y0 = 0
y1 = self.image.shape[0]
# ======================================
# Set viewable area
# ======================================
self.ax.set_xlim(x0, x1)
self.ax.set_ylim(y0, y1)
# ======================================
# Redraw canvas to show updates
# ======================================
self.ax.figure.canvas.draw()
class Mpl_Image_Plus_Slider(QtGui.QWidget):
# def __init__(self, parent=None, **kwargs):
def __init__(self, parent=None, **kwargs):
# Initialize self as a widget
QtGui.QWidget.__init__(self, parent)
# Add a vertical layout with parent self
self.vLayout = QtGui.QVBoxLayout(self)
self.vLayout.setObjectName(_fromUtf8("vLayout"))
# Add an Mpl_Image widget to vLayout,
# save it to self._img
# Pass arguments through to Mpl_Image.
self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs)
self._img.setObjectName(_fromUtf8("_img"))
self.vLayout.addWidget(self._img)
# Add a slider to vLayout,
# save it to self.max_slider
# self.max_slider = QtGui.QSlider(self)
self.max_slider = Slider_and_Text(self)
self.max_slider.setObjectName(_fromUtf8("max_slider"))
self.max_slider.setOrientation(QtCore.Qt.Horizontal)
self.vLayout.addWidget(self.max_slider)
# Setup slider to work with _img's clims
self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val))
def _get_image(self):
return self._img.image
def _set_image(self, image):
self._img.image = image
maximage = _np.max(_np.max(image))
self.max_slider.setMaximum(maximage)
image = property(_get_image, _set_image)
def _get_ax(self):
return self._img.ax
ax = property(_get_ax)
def _get_Rectangle(self):
return self._img.Rectangle
# def _set_rect(self, rect):
# self._img.rect(rect)
Rectangle = property(_get_Rectangle)
def zoom_rect(self, border=None, border_px=None):
self._img.zoom_rect(border, border_px)
def set_clim(self, *args, **kwargs):
self._img.set_clim(*args, **kwargs)
def setSliderValue(self, val):
self.max_slider.setValue(val)
| mit |
mathhun/scipy_2015_sklearn_tutorial | notebooks/figures/plot_kneighbors_regularization.py | 25 | 1363 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
| cc0-1.0 |
qifeigit/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
sonyahanson/assaytools | examples/ipynbs/data-analysis/spectra/2015-12-18/xml2png4scans-spectra.py | 8 | 5636 | # This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader
# and makes quick and dirty images of the raw data.
# But with scans and not just singlet reads.
# This script specifically combines four spectrum scripts (AB, CD, EF, GH) into a single dataframe and plot.
# The same procedure can be used to make matrices suitable for analysis using
# matrix = dataframe.values
# Made by Sonya Hanson, with some help from things that worked in xml2png.py and xml2png4scans.py
# Friday, November 18,2015
# Usage: python xml2png4scans-spectra.py *.xml
############ For future to combine with xml2png.py
#
# for i, sect in enumerate(Sections):
# reads = sect.xpath("*/Well")
# parameters = root.xpath(path)[0]
# if reads[0].attrib['Type'] == "Scan":
#
##############
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import matplotlib.cm as cm
import seaborn
import sys
import os
### Define xml files.
xml_files = sys.argv[1:]
so_many = len(xml_files)
print "****This script is about to make png files for %s xml files. ****" % so_many
### Define extract function that extracts parameters
def extract(taglist):
result = []
for p in taglist:
print "Attempting to extract tag '%s'..." % p
try:
param = parameters.xpath("*[@Name='" + p + "']")[0]
result.append( p + '=' + param.attrib['Value'])
except:
### tag not found
result.append(None)
return result
### Define an initial set of dataframes, one per each section
large_dataframe0 = pd.DataFrame()
large_dataframe1 = pd.DataFrame()
large_dataframe2 = pd.DataFrame()
for file in xml_files:
### Parse XML file.
root = etree.parse(file)
### Remove extension from xml filename.
file_name = os.path.splitext(file)[0]
### Extract plate type and barcode.
plate = root.xpath("/*/Header/Parameters/Parameter[@Name='Plate']")[0]
plate_type = plate.attrib['Value']
try:
bar = root.xpath("/*/Plate/BC")[0]
barcode = bar.text
except:
barcode = 'no barcode'
### Define Sections.
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
for i, sect in enumerate(Sections):
### Extract Parameters for this section.
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
### Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.
# Attach these to title1, title2, or title3, depending on section which will be the same for all 4 files.
if parameters[0].attrib['Value'] == "Absorbance":
result = extract(["Mode", "Wavelength Start", "Wavelength End", "Wavelength Step Size"])
globals()["title"+str(i)] = '%s, %s, %s, %s' % tuple(result)
else:
result = extract(["Gain", "Excitation Wavelength", "Emission Wavelength", "Part of Plate", "Mode"])
globals()["title"+str(i)] = '%s, %s, %s, \n %s, %s' % tuple(result)
print "****The %sth section has the parameters:****" %i
print globals()["title"+str(i)]
### Extract Reads for this section.
Sections = root.xpath("/*/Section")
reads = root.xpath("/*/Section[@Name='" + sect.attrib['Name'] + "']/*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(s.text, float(s.attrib['WL']), r.attrib['Pos'])
for r in reads
for s in r]
dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])
### dataframe_rep replaces 'OVER' (when fluorescence signal maxes out) with '3289277', an arbitrarily high number
dataframe_rep = dataframe.replace({'OVER':'3289277'})
dataframe_rep[['fluorescence']] = dataframe_rep[['fluorescence']].astype('float')
### Create large_dataframe1, large_dataframe2, and large_dataframe3 that collect data for each section
### as we run through cycle through sections and files.
globals()["dataframe_pivot"+str(i)] = pd.pivot_table(dataframe_rep, index = 'wavelength (nm)', columns= ['Well'])
print 'The max fluorescence value in this dataframe is %s'% globals()["dataframe_pivot"+str(i)].values.max()
globals()["large_dataframe"+str(i)] = pd.concat([globals()["large_dataframe"+str(i)],globals()["dataframe_pivot"+str(i)]])
### Plot, making a separate png for each section.
for i, sect in enumerate(Sections):
section_name = sect.attrib['Name']
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
if parameters[0].attrib['Value'] == "Absorbance":
section_ylim = [0,0.2]
else:
section_ylim = [0,40000]
Alphabet = ['A','B','C','D','E','F','G','H']
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))
for j,A in enumerate(Alphabet):
for k in range(1,12):
try:
globals()["large_dataframe"+str(i)].fluorescence.get(A + str(k)).plot(ax=axes[(j/3)%3,j%3], title=A, c=cm.hsv(k*15), ylim=section_ylim, xlim=[240,800])
except:
print "****No row %s.****" %A
fig.suptitle('%s \n %s \n Barcode = %s' % (globals()["title"+str(i)], plate_type, barcode), fontsize=14)
fig.subplots_adjust(hspace=0.3)
plt.savefig('%s_%s.png' % (file_name, section_name))
| lgpl-2.1 |
nikitasingh981/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/neighbors/graph.py | 36 | 6650 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self : bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self : bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
micahcochran/geopandas | geopandas/_version.py | 3 | 16750 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "geopandas-"
cfg.versionfile_source = "geopandas/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 58 | 71789 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
yqzhang/OpenANN | benchmarks/iris/benchmark.py | 5 | 3308 | ## \page IrisBenchmark Iris Flower Dataset
#
# The iris dataset is a standard machine learning dataset.
# See e.g. the <a href="http://en.wikipedia.org/wiki/Iris_flower_data_set"
# target=_blank>Wikipedia article</a> for more details.
#
# You can start the benchmark with the script:
# \verbatim
# python benchmark.py [run]
# \endverbatim
# Note that you need Scikit Learn to load the dataset.
#
# The result will look like
# \verbatim
# Iris data set has 4 inputs, 3 classes and 150 examples
# The data has been split up input training and validation set.
# Correct predictions on training set: 120/120
# Confusion matrix:
# [[ 40. 0. 0.]
# [ 0. 40. 0.]
# [ 0. 0. 40.]]
# Correct predictions on test set: 30/30
# Confusion matrix:
# [[ 10. 0. 0.]
# [ 0. 10. 0.]
# [ 0. 0. 10.]]
# \endverbatim
import sys
try:
from sklearn import datasets
except:
print("scikit-learn is required to run this example.")
exit(1)
try:
from openann import *
except:
print("OpenANN Python bindings are not installed!")
exit(1)
def print_usage():
print("Usage:")
print(" python benchmark [run]")
def run_iris():
# Load IRIS dataset
iris = datasets.load_iris()
X = iris.data
Y = iris.target
D = X.shape[1]
F = len(numpy.unique(Y))
N = len(X)
# Preprocess data (normalization and 1-of-c encoding)
X = (X - X.mean(axis=0)) / X.std(axis=0)
T = numpy.zeros((N, F))
T[(range(N), Y)] = 1.0
# Setup network
net = Net()
net.set_regularization(0.0, 0.01, 0.0)
net.input_layer(D)
net.fully_connected_layer(100, Activation.RECTIFIER)
net.fully_connected_layer(100, Activation.RECTIFIER)
net.output_layer(F, Activation.SOFTMAX)
net.set_error_function(Error.CE)
# Split dataset into training set and validation set and make sure that
# each class is equally distributed in the datasets
X1 = numpy.vstack((X[0:40], X[50:90], X[100:140]))
T1 = numpy.vstack((T[0:40], T[50:90], T[100:140]))
training_set = DataSet(X1, T1)
X2 = numpy.vstack((X[40:50], X[90:100], X[140:150]))
T2 = numpy.vstack((T[40:50], T[90:100], T[140:150]))
validation_set = DataSet(X2, T2)
# Train for 500 episodes (with tuned parameters for MBSGD)
optimizer = MBSGD({"maximal_iterations": 500}, learning_rate=0.7,
learning_rate_decay=0.999, min_learning_rate=0.001, momentum=0.5,
batch_size=16)
Log.set_info() # Deactivate debug output
optimizer.optimize(net, training_set)
print("Iris data set has %d inputs, %d classes and %d examples" % (D, F, N))
print("The data has been split up input training and validation set.")
print("Correct predictions on training set: %d/%d"
% (classification_hits(net, training_set), len(X1)))
print("Confusion matrix:")
print(confusion_matrix(net, training_set))
print("Correct predictions on test set: %d/%d"
% (classification_hits(net, validation_set), len(X2)))
print("Confusion matrix:")
print(confusion_matrix(net, validation_set))
if __name__ == "__main__":
if len(sys.argv) == 1:
print_usage()
for command in sys.argv[1:]:
if command == "run":
run_iris()
else:
print_usage()
exit(1)
| gpl-3.0 |
alexei-matveev/ase-local | doc/exercises/siesta1/answer1.py | 3 | 1197 | # -*- coding: utf-8 -*-
# creates: ener.png distance.png angle.png
import os
import matplotlib
matplotlib.use('Agg')
import pylab as plt
e_s = [0.01,0.1,0.2,0.3,0.4,0.5]
E = [-463.2160, -462.9633, -462.4891, -462.0551,
-461.5426, -461.1714]
d = [1.1131, 1.1046, 1.0960, 1.0901,
1.0857, 1.0810]
alpha = [100.832453365, 99.568214268, 99.1486065462,
98.873671379, 98.1726341945, 98.0535643778]
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.29, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, E, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'Energy [eV]')
plt.title('Total Energy vs Eshift')
plt.savefig('ener.png')
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.24, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, d, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'O-H distance [Å]')
limits = plt.axis('tight')
plt.title('O-H distance vs Eshift')
plt.savefig('distance.png')
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.26, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, alpha, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'H20 angle')
limits = plt.axis('tight')
plt.title('O-H distance vs Eshift')
plt.savefig('angle.png')
| gpl-2.0 |
mugwizaleon/PCRasterMapstacks | pcrastermapstackvisualisation.py | 1 | 17920 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PcrasterMapstackVisualisation
A QGIS plugin
PCRaster Mapstack visualisation
-------------------
begin : 2014-06-28
copyright : (C) 2014 by Leon
email : mugwizal@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
import qgis.utils
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
from pcrastermapstackvisualisationdialog import PcrasterMapstackVisualisationDialog
from Animationdialog import AnimationDialog
from TSSvisualizationdialog import TSSVisualizationDialog
# Import modules
import os.path
import os, glob
import time
import sys
import string
class PcrasterMapstackVisualisation:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'pcrastermapstackvisualisation_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = PcrasterMapstackVisualisationDialog()
self.dlg2 = AnimationDialog()
self.dlg3 = TSSVisualizationDialog()
# Mapstack series visualization
QObject.connect( self.dlg.ui.pushButton_7, SIGNAL( "clicked()" ), self.DisplayTSSnames)
QObject.connect( self.dlg.ui.pushButton_6, SIGNAL( "clicked()" ), self.TSSgraphs)
QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( "clicked()" ), self.selectDir ) #link the button to the function of selecting the directory
QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( "clicked()" ), self.loadMapStackCoreName ) #link the button to the function of selecting the directory
QObject.connect( self.dlg.ui.pushButton_5, SIGNAL( "clicked()" ), self.actionStart)
QObject.connect( self.dlg2.ui.pushButton_2, SIGNAL( "clicked()" ), self.ActionAnim)
QObject.connect( self.dlg2.ui.pushButton_3, SIGNAL( "clicked()" ), self.actionNext)
QObject.connect( self.dlg2.ui.pushButton, SIGNAL( "clicked()" ), self.actionPrevious)
QObject.connect( self.dlg2.ui.pushButton_4, SIGNAL( "clicked()" ), self.actionStart)
QObject.connect( self.dlg2.ui.pushButton_5, SIGNAL( "clicked()" ), self.actionLast)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentIndexChanged (const QString&)"), self.changelist) #Change the list of mapstacks
#Close dialogs widgets
QObject.connect( self.dlg.ui.pushButton, SIGNAL( "clicked()" ), self.close1)
QObject.connect( self.dlg3.ui.pushButton, SIGNAL( "clicked()" ), self.close2)
QObject.connect( self.dlg2.ui.pushButton_6, SIGNAL( "clicked()" ), self.close3)
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/pcrastermapstackvisualisation/Myicon.png"),
u"Mapstacks_visualisation", self.iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&PCRaster Mapstacks Viewer", self.action)
self.iface.addPluginToRasterMenu(u"&PCRaster Mapstacks Viewer", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&PCRaster Time series Viewer", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
def close1(self):
self.dlg.close()
def TSSview(self):
self.dlg3.move(10, 300)
self.dlg3.show()# show the dialog
def close2(self):
self.dlg3.close()
self.dlg.show()
def AnimationDlg (self):
self.dlg2.move(200, 200)
self.dlg2.show()# show the dialog
def close3(self):
self.dlg2.close()
self.dlg.show()
# Selecting the directory containg files
def selectDir( self ):
self.dlg.hide()
settings = QSettings()
path = QFileDialog.getExistingDirectory( self.iface.mainWindow(), "Select a directory")
if path: self.dlg.ui.txtBaseDir2_5.setText( path )
self.dlg.show()
def actionRemove(self):
layers = self.iface.legendInterface().layers()
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
else : self.iface.legendInterface().moveLayer( layer, 0 )
self.iface.legendInterface().removeGroup(0)
def AddLayer(self, input):
layerPath = os.path.join(self.dataDir, input)
fileInfo = QFileInfo(layerPath)
baseName = fileInfo.baseName()
layer = QgsRasterLayer(layerPath, baseName)
uri = os.path.join(self.dataDir, 'MyFile.qml')
layer.loadNamedStyle(uri)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def loadFiles(self, filename):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
file_list = glob.glob(filename)
for index in file_list:
list = index.split(".")
if (len(list) < 2) :
file_list.remove(index)
for index in file_list:
if index.endswith(".tss"):
file_list.remove(index)
for index in file_list:
if index.endswith(".xml") or index.endswith(".aux.xml") :
file_list.remove(index)
for index in file_list:
if index.endswith(".tss"):
file_list.remove(index)
file_list.sort()
return file_list
def loadMapStackCoreName(self):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
files= os.listdir(self.dataDir)
self.dlg.ui.comboBox.clear()
self.dlg.ui.comboBox_2.clear()
MyList=[]
MyList2 =[]
MyList3 = []
for index in files:
list = index.split(".")
if (len(list)==2) and (len(list[0])== 8) and (len(list[1])== 3) and (list[1].isdigit()):
MyList.append(index)
if index.endswith(".tss"):
MyList3.append(index)
for index in MyList:
list = index.split(".")
words = list[0].replace("0", "")
MyList2.append(words)
FinalList = []
for i in MyList2:
if i not in FinalList:
FinalList.append(i)
self.dlg.ui.comboBox.addItems(FinalList)
self.dlg.ui.comboBox_2.addItems(MyList3)
def DisplayTSSnames(self):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
if not self.dataDir : pass
else:
os.chdir(self.dataDir )
if not self.dlg.ui.comboBox.currentText(): pass
else:
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
self.dlg.ui.listWidget.clear()
for index, file in enumerate(file_list):
self.dlg.ui.listWidget.addItem(file)
def changelist(self):
self.dlg.ui.listWidget.clear()
def ActionAnim(self):
self.actionRemove()
Group = self.iface.legendInterface().addGroup("group_foo")
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
legend = self.iface.legendInterface()
self.dlg2.ui.pushButton_6.setEnabled(False)
for index, file in enumerate(file_list):
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file, os.path.basename(str(file))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
rlayer = qgis.utils.iface.activeLayer()
legend.moveLayer( rlayer, 0 )
time.sleep(float(self.dlg2.ui.txtBaseDir2_5.text()))
self.dlg2.ui.pushButton_6.setEnabled(True)
def actionStart(self):
import Styling
self.dlg.hide()
self.iface.messageBar().clearWidgets ()
layers = self.iface.legendInterface().layers()
for layer in layers :
if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
if not self.dataDir :
QMessageBox.information( self.iface.mainWindow(),"Info", "Please select a directory first")
self.dlg.show()
else :
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
if not self.dlg.ui.comboBox.currentText():
QMessageBox.information( self.iface.mainWindow(),"Info", "The are no PCRaster mapstacks in this directory")
self.dlg.show()
# return
else:
self.AnimationDlg()
Styling.style1(filename, 'value', self.dataDir, file_list )
s = QSettings()
oldValidation = s.value( "/Projections/defaultBehaviour", "useGlobal" )
s.setValue( "/Projections/defaultBehaviour", "useGlobal" )
self.AddLayer(str(file_list[0]))
s.setValue( "/Projections/defaultBehaviour", oldValidation )
layer = qgis.utils.iface.activeLayer()
# self.PrincipalLayer = layer.name()
# print self.PrincipalLayer
self.iface.legendInterface().setLayerExpanded(layer, True)
def actionLast(self):
self.actionRemove()
self.dlg.hide()
self.AnimationDlg()
self.iface.messageBar().clearWidgets ()
layers = self.iface.legendInterface().layers()
for layer in layers :
if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
index = len(file_list) - 1
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def actionNext(self):
self.actionRemove()
self.iface.messageBar().clearWidgets ()
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
if layer is None :
index = 0
elif layer.name() not in file_list:
index = 0
else :
counter = file_list.index(layer.name())
index = counter + 1
if counter == len(file_list) - 1 :
layers = self.iface.legendInterface().layers()
self.iface.legendInterface().addGroup("group_foo")
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )
index = 0
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def actionPrevious(self):
self.actionRemove()
self.iface.messageBar().clearWidgets ()
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
if layer is None :
index = len(file_list) - 1
elif layer.name() not in file_list:
index = len(file_list) - 1
else :
counter = file_list.index(layer.name())
index = counter - 1
if counter == 0 :
layers = self.iface.legendInterface().layers()
self.iface.legendInterface().addGroup("group_foo")
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )
index = len(file_list) - 1
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def TSSgraphs(self):# wtih matplotlib
self.dlg.hide()
filename = str(self.dlg.ui.comboBox_2.currentText())
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
file = os.path.join (self.dataDir, filename)
if os.path.isfile(file):
self.TSSview()
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
stripped = []
stripper = open(filename, 'r')
st_lines = stripper.readlines()[4:]
stripper.close()
for lines in st_lines:
stripped_line = " ".join(lines.split())
stripped.append(stripped_line)
data = "\n".join(stripped)
data = data.split('\n')
values = []
dates = []
years = 0
yl = []
for row in data:
x, y = row.split()
values.append(float(y))
year = (int(x.translate(string.maketrans("\n\t\r", " ")).strip()))
dates.append(year)
years = years +1
yl.append(years)
xlabels = yl
self.dlg3.ui.widget.canvas.ax.clear()
self.dlg3.ui.widget.canvas.ax.set_position([0.155,0.15,0.82,0.75])
self.dlg3.ui.widget.canvas.ax.set_title(filename)
self.dlg3.ui.widget.canvas.ax.set_xlabel ('Time step')
self.dlg3.ui.widget.canvas.ax.set_ylabel ('Values')
self.dlg3.ui.widget.canvas.ax.plot(dates, values)
self.dlg3.ui.widget.canvas.ax.set_xticks(dates)
self.dlg3.ui.widget.canvas.ax.set_xticklabels(xlabels, rotation=30, fontsize=10)
self.dlg3.ui.widget.canvas.draw()
else:
QMessageBox.information( self.iface.mainWindow(),"Info", "The are no PCRaster timeseries this directory")
self.dlg.show()
| apache-2.0 |
jreback/pandas | pandas/io/formats/latex.py | 2 | 25201 | """
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: Optional[Union[str, Tuple[str, str]]]
) -> Tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
short_caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[Union[str, Tuple[str, str]]] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> Optional[str]:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
trungnt13/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
michelp/pywt | util/refguide_check.py | 2 | 27051 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a PyWavelets submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
import numpy as np
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc',
# 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "pywt"
PUBLIC_SUBMODULES = []
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = []
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg": None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf, }
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(
self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim',
'set_xlim', '# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*',
help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-examples", action="store_true",
help="Skip running doctests in the examples.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_examples = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_examples:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_examples:
examples_path = os.path.join(
os.getcwd(), 'doc', 'source', 'regression', '*.rst')
print('\nChecking examples files at %s:' % examples_path)
for filename in sorted(glob.glob(examples_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
examples_results = check_doctests_testfile(
filename, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, examples_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| mit |
samklr/spark-timeseries | python/sparkts/test/test_timeseriesrdd.py | 6 | 5407 | from test_utils import PySparkTestCase
from sparkts.timeseriesrdd import *
from sparkts.timeseriesrdd import _TimeSeriesSerializer
from sparkts.datetimeindex import *
import pandas as pd
import numpy as np
from unittest import TestCase
from io import BytesIO
from pyspark.sql import SQLContext
class TimeSeriesSerializerTestCase(TestCase):
def test_times_series_serializer(self):
serializer = _TimeSeriesSerializer()
stream = BytesIO()
series = [('abc', np.array([4.0, 4.0, 5.0])), ('123', np.array([1.0, 2.0, 3.0]))]
serializer.dump_stream(iter(series), stream)
stream.seek(0)
reconstituted = list(serializer.load_stream(stream))
self.assertEquals(reconstituted[0][0], series[0][0])
self.assertEquals(reconstituted[1][0], series[1][0])
self.assertTrue((reconstituted[0][1] == series[0][1]).all())
self.assertTrue((reconstituted[1][1] == series[1][1]).all())
class TimeSeriesRDDTestCase(PySparkTestCase):
def test_time_series_rdd(self):
freq = DayFrequency(1, self.sc)
start = '2015-04-09'
dt_index = uniform(start, periods=10, freq=freq, sc=self.sc)
vecs = [np.arange(0, 10), np.arange(10, 20), np.arange(20, 30)]
rdd = self.sc.parallelize(vecs).map(lambda x: (str(x[0]), x))
tsrdd = TimeSeriesRDD(dt_index, rdd)
self.assertEquals(tsrdd.count(), 3)
contents = tsrdd.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(0, 10)).all())
self.assertTrue((contents["10"] == np.arange(10, 20)).all())
self.assertTrue((contents["20"] == np.arange(20, 30)).all())
subslice = tsrdd['2015-04-10':'2015-04-15']
self.assertEquals(subslice.index(), uniform('2015-04-10', periods=6, freq=freq, sc=self.sc))
contents = subslice.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(1, 7)).all())
self.assertTrue((contents["10"] == np.arange(11, 17)).all())
self.assertTrue((contents["20"] == np.arange(21, 27)).all())
def test_to_instants(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
samples = tsrdd.to_instants().collect()
target_dates = ['2015-4-9', '2015-4-10', '2015-4-11', '2015-4-12']
self.assertEquals([x[0] for x in samples], [pd.Timestamp(x) for x in target_dates])
self.assertTrue((samples[0][1] == np.arange(0, 20, 4)).all())
self.assertTrue((samples[1][1] == np.arange(1, 20, 4)).all())
self.assertTrue((samples[2][1] == np.arange(2, 20, 4)).all())
self.assertTrue((samples[3][1] == np.arange(3, 20, 4)).all())
def test_to_observations(self):
sql_ctx = SQLContext(self.sc)
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
print(dt_index._jdt_index.size())
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
obsdf = tsrdd.to_observations_dataframe(sql_ctx)
tsrdd_from_df = time_series_rdd_from_observations( \
dt_index, obsdf, 'timestamp', 'key', 'value')
ts1 = tsrdd.collect()
ts1.sort(key = lambda x: x[0])
ts2 = tsrdd_from_df.collect()
ts2.sort(key = lambda x: x[0])
self.assertTrue(all([pair[0][0] == pair[1][0] and (pair[0][1] == pair[1][1]).all() \
for pair in zip(ts1, ts2)]))
df1 = obsdf.collect()
df1.sort(key = lambda x: x.value)
df2 = tsrdd_from_df.to_observations_dataframe(sql_ctx).collect()
df2.sort(key = lambda x: x.value)
self.assertEquals(df1, df2)
def test_filter(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
filtered = tsrdd.filter(lambda x: x[0] == 'a' or x[0] == 'b')
self.assertEquals(filtered.count(), 2)
# assert it has TimeSeriesRDD functionality:
filtered['2015-04-10':'2015-04-15'].count()
def test_to_pandas_series_rdd(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
series_arr = tsrdd.to_pandas_series_rdd().collect()
pd_index = dt_index.to_pandas_index()
self.assertEquals(len(vecs), len(series_arr))
for i in xrange(len(vecs)):
self.assertEquals(series_arr[i][0], labels[i])
self.assertTrue(pd.Series(vecs[i], pd_index).equals(series_arr[i][1]))
| apache-2.0 |
jblackburne/scikit-learn | sklearn/tree/tests/test_tree.py | 7 | 55471 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
| bsd-3-clause |
andaag/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/tree/export.py | 30 | 4529 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_inst/padova_inst_6/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
rphlypo/parietalretreat | setup_data_path_salma.py | 1 | 6001 | import glob
import os.path
from pandas import DataFrame
import pandas
def get_all_paths(data_set=None, root_dir="/"):
# TODO
# if data_set ... collections.Sequence
# iterate over list
if data_set is None:
data_set = {"hcp", "henson2010faces", "ds105", "ds107"}
list_ = list()
head, tail_ = os.path.split(root_dir)
counter = 0
while tail_:
head, tail_ = os.path.split(head)
counter += 1
if hasattr(data_set, "__iter__"):
df_ = list()
for ds in data_set:
df_.append(get_all_paths(data_set=ds, root_dir=root_dir))
df = pandas.concat(df_, keys=data_set)
elif data_set.startswith("ds") or data_set == "henson2010faces":
base_path = os.path.join(root_dir,
"storage/workspace/brainpedia/preproc/",
data_set)
with open(os.path.join(base_path, "scan_key.txt")) as file_:
TR = file_.readline()[3:-1]
for fun_path in glob.iglob(os.path.join(base_path,
"sub*/model/model*/"
"BOLD/task*/bold.nii.gz")):
head, tail_ = os.path.split(fun_path)
tail = [tail_]
while tail_:
head, tail_ = os.path.split(head)
tail.append(tail_)
tail.reverse()
subj_id = tail[6 + counter][-3:]
model = tail[8 + counter][-3:]
task, run = tail[10 + counter].split("_")
tmp_base = os.path.split(os.path.split(fun_path)[0])[0]
anat = os.path.join(tmp_base,
"anatomy",
"highres{}.nii.gz".format(model[-3:]))
onsets = glob.glob(os.path.join(tmp_base, "onsets",
"task{}_run{}".format(task, run),
"cond*.txt"))
confds = os.path.join(os.path.split(fun_path)[0], "motion.txt")
list_.append({"subj_id": subj_id,
"model": model,
"task": task[-3:],
"run": run[-3:],
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": TR})
if onsets:
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
elif data_set == "hcp":
base_path = os.path.join(root_dir, "storage/data/HCP/Q2/")
for fun_path in glob.iglob(os.path.join(base_path,
"*/MNINonLinear/Results/",
"*/*.nii.gz")):
head, tail = os.path.split(fun_path)
if head[-2:] not in ["LR", "RL"]:
continue
tail = [tail]
while head != "/":
head, t = os.path.split(head)
tail.append(t)
if tail[0][:-7] != tail[1]:
continue
tail.reverse()
subj_id = tail[4 + counter]
task = tail[7 + counter][6:-3]
if tail[7 + counter].startswith("rfMRI"):
run = task[-1]
task = task[:-1]
mode = tail[7 + counter][-2:]
anat = os.path.join(base_path, subj_id, "MNINonLinear/T1w.nii.gz")
confds = os.path.join(os.path.split(fun_path)[0],
"Movement_Regressors.txt")
list_.append({"subj_id": subj_id,
"task": task,
"mode": mode,
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": 0.72})
if tail[8 + counter].startswith("rfMRI"):
list_[-1]["run"] = run
else:
onsets = [onset
for onset in glob.glob(os.path.join(
os.path.split(fun_path)[0], "EVs/*.txt"))
if os.path.split(onset)[1][0] != "S"]
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
return df
if __name__ == "__main__":
from nilearn.input_data import MultiNiftiMasker, NiftiMapsMasker
from joblib import Memory, Parallel, delayed
import joblib
from sklearn.base import clone
import nibabel
root_dir = "/media/Elements/volatile/new/salma"
mem = Memory(cachedir=os.path.join(root_dir,
("storage/workspace/brainpedia"
"/preproc/henson2010faces/dump/")))
print "Loading all paths and variables into memory"
df = get_all_paths(root_dir=root_dir,
data_set=["henson2010faces"])
target_affine_ = nibabel.load(df["func"][0]).get_affine()
target_shape_ = nibabel.load(df["func"][0]).shape[:-1]
print "preparing and running MultiNiftiMasker"
mnm = MultiNiftiMasker(mask_strategy="epi", memory=mem, n_jobs=1,
verbose=10, target_affine=target_affine_,
target_shape=target_shape_)
mask_img = mnm.fit(list(df["func"])).mask_img_
print "preparing and running NiftiMapsMasker"
nmm = NiftiMapsMasker(
maps_img=os.path.join("/usr/share/fsl/data/atlases/HarvardOxford/",
"HarvardOxford-cortl-prob-2mm.nii.gz"),
mask_img=mask_img, detrend=True, smoothing_fwhm=5, standardize=True,
low_pass=None, high_pass=None, memory=mem, verbose=10)
region_ts = [clone(nmm).fit_transform(niimg, n_hv_confounds=5)
for niimg in list(df["func"])]
joblib.dump(region_ts, "/home/storage/workspace/rphlypo/retreat/results/")
region_signals = DataFrame({"region_signals": region_ts}, index=df.index)
df.join(region_signals)
| bsd-2-clause |
debsankha/bedtime-programming | ls222/visual-lotka.py | 1 | 5120 | #!/usr/bin/env python
from math import *
import thread
import random
import time
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import commands
import matplotlib.pyplot
class rodent:
def __init__(self):
self.time_from_last_childbirth=0
class felix:
def __init__(self):
self.size=0
self.is_virgin=1
self.reproduction_gap=0
self.time_from_last_childbirth=0
self.age=0
# print 'painted'
class gui_display:
def __init__(self):
self.gladefile='./lvshort.glade'
self.wTree = gtk.glade.XML(self.gladefile)
dic={"on_start_clicked":self.dynamics,"on_mainwin_destroy":gtk.main_quit}
self.wTree.signal_autoconnect(dic)
self.wTree.get_widget("mainwin").show()
self.wTree.get_widget("image").set_from_file("./start.png")
def visualize(self,catn,mousen):
# while True:
num=40
size=10
catno=catn*num**2/(catn+mousen)
cats=random.sample(range(num**2),catno)
for i in range(num**2):
if i in cats:
self.dic[i].color=visual.color.red
else :
self.dic[i].color=visual.color.green
def dynamics(self,*args,**kwargs):
self.wTree.get_widget("image").set_from_file("./wait.png")
print 'dynamics started'
mouse_size=20 #ind parameter
cat_mature_size=60 #ind parameter
# catch_rate=5*10**-4 #parameter
# cat_efficiency=0.8 #parameter
# a=0.2 #will get from slider
# c=0.2 #will get from slider
cat_catch_rate=self.wTree.get_widget("catchrate").get_value()*10**-4 #parameter
cat_efficiency=self.wTree.get_widget("efficiency").get_value() #parameter
a=self.wTree.get_widget("a").get_value() #parameter
c=self.wTree.get_widget("c").get_value() #parameter
mouse_no=1000
cat_no=1000
t=0
tmax=200
dt=1
timeli=[]
miceli=[]
catli=[]
mice=[rodent() for i in range(mouse_no)]
cats=[felix() for i in range(cat_no)]
catn=len(cats)
mousen=len(mice)
self.dic={}
num=40
size=10
catno=catn*num**2/(catn+mousen)
disp_cats=random.sample(range(num**2),catno)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
for i in range(num**2):
coords=((i%num)*size*2-num*size,(i/num)*size*2-num*size)
if i in disp_cats:
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red)
else :
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green)
print self.dic
catn=len(cats)
mousen=len(mice)
data=open('tempdata.dat','w')
timestart=time.time()
while (len(mice)>0 or len(cats)>0) and t<tmax and (time.time()-timestart)<60:
# print time.time()-timestart
catn=len(cats)
mousen=len(mice)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
# self.visualize(catn,mousen)
thread.start_new_thread(self.visualize,(catn,mousen))
for mouse in mice:
if mouse.time_from_last_childbirth>=1/a:
mouse.time_from_last_childbirth=0
mice.append(rodent())
mouse.time_from_last_childbirth+=dt
ind=0
while ind<len(cats):
cat=cats[ind]
cat.age+=dt
num=cat_catch_rate*dt*len(mice)
for i in range(int(num)):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if (num-int(num))>random.uniform(0,1):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if cat.size>cat_mature_size:
if cat.is_virgin:
cat.is_virgin=0
cat.reproduction_gap=cat.age
cats.append(felix())
else :
if cat.time_from_last_childbirth>cat.reproduction_gap:
cats.append(felix())
cat.time_from_last_childbirth=0
if cat.is_virgin==0:
cat.time_from_last_childbirth+=dt
if len(cats)>0:
if c*dt*2*atan(0.05*len(cats))/pi>random.uniform(0,1):
cats.pop(ind)
else :
ind+=1
else :
ind+=1
timeli.append(t)
miceli.append(len(mice))
catli.append(len(cats))
print t,'\t',len(mice),'\t',len(cats)
print >> data, t,'\t',len(mice),'\t',len(cats)
t+=dt
data.close()
upper_limit=1.2*len(mice)
pltfile=open('lv.plt','w')
print >> pltfile,"""se te png
se o "/tmp/lv.png"
unse ke
#se yrange [0:%f]
se xl "Time"
se yl "Number of Prey/Predator"
p 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l
"""%upper_limit
pltfile.close()
commands.getoutput('gnuplot lv.plt')
self.wTree.get_widget("image").set_from_file("/tmp/lv.png")
print 'dynamics ended'
reload(matplotlib.pyplot)
matplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-')
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel("Number of mice and cats")
matplotlib.pyplot.show()
gui=gui_display()
gtk.main()
#dynamics()
#import matplotlib.pyplot as plt
#plt.plot(timeli,miceli,'go',timeli,catli,'ro')
#plt.show()
| gpl-3.0 |
blaze/distributed | distributed/protocol/tests/test_collection_cuda.py | 1 | 2448 | import pytest
from distributed.protocol import serialize, deserialize
from dask.dataframe.utils import assert_eq
import pandas as pd
@pytest.mark.parametrize("collection", [tuple, dict])
@pytest.mark.parametrize("y,y_serializer", [(50, "cuda"), (None, "pickle")])
def test_serialize_cupy(collection, y, y_serializer):
cupy = pytest.importorskip("cupy")
x = cupy.arange(100)
if y is not None:
y = cupy.arange(y)
if issubclass(collection, dict):
header, frames = serialize(
{"x": x, "y": y}, serializers=("cuda", "dask", "pickle")
)
else:
header, frames = serialize((x, y), serializers=("cuda", "dask", "pickle"))
t = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert header["is-collection"] is True
sub_headers = header["sub-headers"]
assert sub_headers[0]["serializer"] == "cuda"
assert sub_headers[1]["serializer"] == y_serializer
assert isinstance(t, collection)
assert ((t["x"] if isinstance(t, dict) else t[0]) == x).all()
if y is None:
assert (t["y"] if isinstance(t, dict) else t[1]) is None
else:
assert ((t["y"] if isinstance(t, dict) else t[1]) == y).all()
@pytest.mark.parametrize("collection", [tuple, dict])
@pytest.mark.parametrize(
"df2,df2_serializer",
[(pd.DataFrame({"C": [3, 4, 5], "D": [2.5, 3.5, 4.5]}), "cuda"), (None, "pickle")],
)
def test_serialize_pandas_pandas(collection, df2, df2_serializer):
cudf = pytest.importorskip("cudf")
df1 = cudf.DataFrame({"A": [1, 2, None], "B": [1.0, 2.0, None]})
if df2 is not None:
df2 = cudf.from_pandas(df2)
if issubclass(collection, dict):
header, frames = serialize(
{"df1": df1, "df2": df2}, serializers=("cuda", "dask", "pickle")
)
else:
header, frames = serialize((df1, df2), serializers=("cuda", "dask", "pickle"))
t = deserialize(header, frames, deserializers=("cuda", "dask", "pickle"))
assert header["is-collection"] is True
sub_headers = header["sub-headers"]
assert sub_headers[0]["serializer"] == "cuda"
assert sub_headers[1]["serializer"] == df2_serializer
assert isinstance(t, collection)
assert_eq(t["df1"] if isinstance(t, dict) else t[0], df1)
if df2 is None:
assert (t["df2"] if isinstance(t, dict) else t[1]) is None
else:
assert_eq(t["df2"] if isinstance(t, dict) else t[1], df2)
| bsd-3-clause |
nicholaschris/landsatpy | utils.py | 1 | 2693 | import operator
import pandas as pd
import numpy as np
from numpy import ma
from scipy.misc import imresize
import scipy.ndimage as ndimage
from skimage.morphology import disk, dilation
def get_truth(input_one, input_two, comparison): # too much abstraction
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
return ops[comparison](input_one, input_two)
def convert_to_celsius(brightness_temp_input):
return brightness_temp_input - 272.15
def calculate_percentile(input_masked_array, percentile):
flat_fill_input = input_masked_array.filled(np.nan).flatten()
df = pd.DataFrame(flat_fill_input)
percentile = df.quantile(percentile/100.0)
return percentile[0]
def save_object(obj, filename):
import pickle
with open(filename, 'wb') as output:
pickle.dump(obj, output)
def downsample(input_array, factor=4):
output_array = input_array[::2, ::2] / 4 + input_array[1::2, ::2] / 4 + input_array[::2, 1::2] / 4 + input_array[1::2, 1::2] / 4
return output_array
def dilate_boolean_array(input_array, disk_size=3):
selem = disk(disk_size)
dilated = dilation(input_array, selem)
return dilated
def get_resized_array(img, size):
lena = imresize(img, (size, size))
return lena
def interp_and_resize(array, new_length):
orig_y_length, orig_x_length = array.shape
interp_factor_y = new_length / orig_y_length
interp_factor_x = new_length / orig_x_length
y = round(interp_factor_y * orig_y_length)
x = round(interp_factor_x * orig_x_length)
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.mgrid.html
new_indicies = np.mgrid[0:orig_y_length:y * 1j, 0:orig_x_length:x * 1j]
# order=1 indicates bilinear interpolation.
interp_array = ndimage.map_coordinates(array, new_indicies,
order=1, output=array.dtype)
interp_array = interp_array.reshape((y, x))
return interp_array
def parse_mtl(in_file):
awesome = True
f = open(in_file, 'r')
print(in_file)
mtl_dict = {}
with open(in_file, 'r') as f:
while awesome:
line = f.readline()
if line.strip() == '' or line.strip() == 'END':
return mtl_dict
elif 'END_GROUP' in line:
pass
elif 'GROUP' in line:
curr_group = line.split('=')[1].strip()
mtl_dict[curr_group] = {}
else:
attr, value = line.split('=')[0].strip(), line.split('=')[1].strip()
mtl_dict[curr_group][attr] = value
| mit |
kwilliams-mo/iris | lib/iris/tests/test_plot.py | 1 | 32122 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as coords
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
import iris.tests.stock
import iris.tests.test_mapping as test_mapping
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord('time').guess_bounds()
return cube
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord('time')
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
cube.remove_coord('time')
self._check(cube)
@iris.tests.skip_data
class TestMissingCS(tests.GraphicsTest):
@iris.tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=['level_height', 'grid_longitude'])
self.check_graphic()
plt_method(self.cube, coords=['grid_longitude', 'level_height'])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=['grid_longitude', 'altitude'])
self.check_graphic()
plt_method(self.cube, coords=['altitude', 'grid_longitude'])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ['altitude', 'grid_longitude']
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord('sigma'), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(self.cube1d.coord('sigma'),
self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name='longitude',
units='degrees_north')
lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name='latitude',
units='degrees_north')
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename('some phenomenon')
cube2.rename('some other phenomenon')
cube1.units = iris.unit.Unit('no_unit')
cube2.units = iris.unit.Unit('no_unit')
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord('time'), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_coords_deprecated(self):
# ensure a warning is raised if the old coords keyword argument is
# used, and make sure the plot produced is consistent with the old
# interface
msg = 'Missing deprecation warning for coords keyword.'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.draw_method(self.cube1d, coords=['sigma'])
self.assertEqual(len(w), 1, msg)
self.check_graphic()
def test_coords_deprecation_too_many(self):
# in deprecation mode, too many coords is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['sigma', 'sigma'])
def test_coords_deprecation_invalid_span(self):
# in deprecation mode, a coordinate that doesn't span data is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['time'])
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord('longitude')
y = self.cube.coord('height')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord('longitude')
y = self.cube.coord('latitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord('latitude')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord('height')
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Rel Humidity')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord('height')[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord('model_level_number')
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = qplt.scatter
@iris.tests.skip_data
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'testing',
'small_theta_colpex.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord('time')
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord('forecast_period')
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name='forecast_period',
units=iris.unit.Unit('hours')
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))
wind = iris.load_cube(path, 'eastward_wind')
# Remove bounds from all coords that have them.
wind.coord('grid_latitude').bounds = None
wind.coord('grid_longitude').bounds = None
wind.coord('level_height').bounds = None
wind.coord('sigma').bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('time')
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('forecast_period')
return cube
class SliceMixin(object):
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@iris.tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@iris.tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@iris.tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, '__warningregistry__'):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
new_key = '_'.join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError('A attribute called {!r} '
'already exists.'.format(new_key))
def override_with_decorated_methods(attr_dict, target_dict,
decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(base.__dict__, local,
ignore_warnings)
return type.__new__(cls, name, bases, local)
@iris.tests.skip_data
class TestPcolorNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormeshNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
class Slice1dMixin(object):
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel('Phenomenon time')
self.check_graphic()
@iris.tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@iris.tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr(object):
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@iris.tests.skip_data
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp'))
self.cube = load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.plot
self.contourf = LambdaStr('iris.plot.contourf',
lambda cube, *args, **kwargs:
iris.plot.contourf(cube, *args, **kwargs))
self.contour = LambdaStr('iris.plot.contour',
lambda cube, *args, **kwargs:
iris.plot.contour(cube, *args, **kwargs))
self.points = LambdaStr('iris.plot.points',
lambda cube, *args, **kwargs:
iris.plot.points(cube, c=cube.data,
*args, **kwargs))
self.plot = LambdaStr('iris.plot.plot',
lambda cube, *args, **kwargs:
iris.plot.plot(cube, *args, **kwargs))
self.results = {'yx': ([self.contourf, ['grid_latitude',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'grid_latitude']],
[self.contour, ['grid_latitude',
'grid_longitude']],
[self.contour, ['grid_longitude',
'grid_latitude']],
[self.points, ['grid_latitude',
'grid_longitude']],
[self.points, ['grid_longitude',
'grid_latitude']],),
'zx': ([self.contourf, ['model_level_number',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'model_level_number']],
[self.contour, ['model_level_number',
'grid_longitude']],
[self.contour, ['grid_longitude',
'model_level_number']],
[self.points, ['model_level_number',
'grid_longitude']],
[self.points, ['grid_longitude',
'model_level_number']],),
'tx': ([self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],),
'x': ([self.plot, ['grid_longitude']],),
'y': ([self.plot, ['grid_latitude']],)
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, coords in results:
draw_method(cube, coords=coords)
try:
self.check_graphic()
except AssertionError, err:
self.fail('Draw method %r failed with coords: %r. '
'Assertion message: %s' % (draw_method, coords, err))
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, coords in results:
draw_method(cube.coord(coords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = 'Draw method {!r} failed with coords: {!r}. ' \
'Assertion message: {!s}'
self.fail(msg.format(draw_method, coords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results['yx'])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results['zx'])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results['tx'])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results['x'])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results['y'])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, 'contourf')
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude'])
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude',
'grid_latitude'])
self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,
cube, coords=['grid_longitude', 'wibble'])
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(pts, standard_name='model_level_number',
attributes={'positive': 'up'})
self.draw('contourf', cube, coords=['grid_latitude', x])
@iris.tests.skip_data
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',
'rotPole_landAreaFraction.nc'))
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord('grid_latitude')
rlon = self.cube.coord('grid_longitude')
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord('latitude')
lon = self.cube.coord('longitude')
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=['longitude', 'latitude'])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])
self.check_graphic()
iplt.contourf(self.cube, coords=['latitude', 'longitude'])
self.check_graphic()
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(range(10), [0] * 10, [iris.symbols.CLOUD_COVER[i]
for i in range(10)], 0.375)
self.check_graphic()
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(coords.AuxCoord(points=cube.data,
standard_name='latitude',
units='degrees'), [0, 1])
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array([lat.points, lat.points + 1,
lat.points + 2, lat.points + 3]).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord('latitude')
lon = cube.coord('longitude')
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
@iris.tests.skip_data
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
kaichogami/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
ONEcampaign/humanitarian-data-service | displacement_tracker_data.py | 1 | 27157 | import requests
import pandas as pd
import os.path
import resources.constants
import json
from pandas.io.json import json_normalize
from utils.data_utils import get_ordinal_number
"""
This script aggregates data from multiple endpoints and returns a single .json file containing all data
used in the displacement tracker project.
Scheduling this script would mean that the /displacement_tracker endpoint always returned the latest data
contained within the Humanitarian Data Service API.
"""
# For development
#ROOT = 'http://localhost:5000'
# For live
ROOT = 'http://ec2-34-200-18-111.compute-1.amazonaws.com'
# Set year for country-level funding data
FUNDING_YEAR = 2016
# Define all endpoints
URL_POPULATIONS_REFUGEELIKE_ASYLUM = '/populations/refugeelike/asylum/index'
URL_POPULATIONS_REFUGEELIKE_ORIGIN = '/populations/refugeelike/origin/index'
URL_INDICATORS_GNI = '/indicators/gni/index'
URL_PLANS_PROGRESS = '/funding/plans/progress/index'
URL_POPULATION = '/populations/totals/index'
URL_FRAGILE_STATE = '/fragility/fragile-state-index/index'
URL_NEEDS = '/needs/plans/index'
URL_FUNDING_DEST_COUNTRY = '/funding/countries/destination/index/{}'.format(FUNDING_YEAR)
URL_FUNDING_DEST_DONORS = '/funding/countries/donors/index'
# Define path for raw country names data
country_names_path = os.path.join(resources.constants.EXAMPLE_RAW_DATA_PATH, 'UNSD Methodology.csv')
# Define path for relatable geography populations data
relatable_population_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, '2017_relatable_population_rankings.csv')
# Define path for stories of displacement
displacement_stories_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'stories_of_displacement_links.csv')
# Create a blank dictionary to store metadata for each field
metadata_dict = {}
def merge_data(
funding_year = FUNDING_YEAR,
country_names_path=country_names_path,
relatable_population_path=relatable_population_path,
displacement_stories_path=displacement_stories_path,
url_populations_refugeelike_asylum=(ROOT + URL_POPULATIONS_REFUGEELIKE_ASYLUM),
url_populations_refugeelike_origin=(ROOT + URL_POPULATIONS_REFUGEELIKE_ORIGIN),
url_indicators_gni=(ROOT + URL_INDICATORS_GNI),
url_plans_progress=(ROOT + URL_PLANS_PROGRESS),
url_population=(ROOT + URL_POPULATION),
url_fragile_state=(ROOT + URL_FRAGILE_STATE),
url_needs=(ROOT + URL_NEEDS),
url_funding_dest_country=(ROOT + URL_FUNDING_DEST_COUNTRY),
url_funding_dest_donors=(ROOT + URL_FUNDING_DEST_DONORS)
):
#################### COUNTRY NAMES ####################
# Get the data from .csv
df_country_names = pd.read_csv(country_names_path, encoding='utf-8')
# Select relevant fields
df_country_names = df_country_names[[
'Country or Area',
'ISO-alpha3 Code'
]]
# Add Taiwan
df_country_names.loc[-1] = ["Taiwan", "TWN"]
# Drop null values
df_country_names = df_country_names.dropna()
# Set country code to be the index
df_country_names = df_country_names.set_index('ISO-alpha3 Code')
# Rename fields
df_country_names.rename(columns={'Country or Area': 'Country'}, inplace=True)
#################### DISPLACEMENT STORIES ####################
# Get the data from .csv
df_displacement_stories = pd.read_csv(displacement_stories_path, encoding='utf-8')
# Set country code to be the index
df_displacement_stories = df_displacement_stories.set_index('countryCode')
# Select relevant fields
df_displacement_stories = df_displacement_stories[[
'storyTitle', 'storySource',
'storyTagLine', 'storyURL'
]]
# Drop null values
df_displacement_stories = df_displacement_stories.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_displacement_stories.columns:
metadata_dict[column] = {}
#################### POPULATIONS ####################
# Get the data from the API
population_data = requests.get(url_population).json()
# Extract metadata
if 'metadata' in population_data:
population_metadata = population_data['metadata']
else:
population_metadata = {}
# Build dataframe
df_population = pd.DataFrame(population_data['data']).T
# Select relevant fields
df_population = df_population[[
'PopTotal'
]]
# Rename fields
df_population.rename(columns={'PopTotal': 'Population'}, inplace=True)
# Drop null values
df_population = df_population.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_population.columns:
metadata_dict[column] = population_metadata
#################### FRAGILE STATE ####################
# Get the data from the API
fragile_state_data = requests.get(url_fragile_state).json()
# Extract metadata
if 'metadata' in fragile_state_data:
fragile_state_metadata = fragile_state_data['metadata']
else:
fragile_state_metadata = {}
# Build a dataframe
df_fragile_state = pd.DataFrame(fragile_state_data['data']).T
# Select relevant fields
df_fragile_state = df_fragile_state[[
'Total', 'Rank'
]]
# Rename fields
df_fragile_state.rename(columns={'Total': 'Fragile State Index Score',
'Rank': 'Fragile State Index Rank'}, inplace=True)
# Drop null values
df_fragile_state = df_fragile_state.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_fragile_state.columns:
metadata_dict[column] = fragile_state_metadata
#################### POPULATIONS_REFUGEELIKE_ASYLUM ####################
# Get the data from the API
populations_refugeelike_asylum_data = requests.get(url_populations_refugeelike_asylum).json()
# Extract metadata
if 'metadata' in populations_refugeelike_asylum_data:
populations_refugeelike_asylum_metadata = populations_refugeelike_asylum_data['metadata']
else:
populations_refugeelike_asylum_metadata = {}
# Build a dataframe
df_populations_refugeelike_asylum = pd.DataFrame(populations_refugeelike_asylum_data['data']).T
# Select relevant fields
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum[[
'Total population of concern', 'Total Refugee and people in refugee-like situations',
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations','Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_asylum.rename(columns={
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations': 'IDPs protected/assisted by UNHCR',
'Asylum-seekers': 'Asylum-seekers (asylum)'
}, inplace=True)
# Add field to rank total total population of concern
df_populations_refugeelike_asylum['Rank of total population of concern'] = df_populations_refugeelike_asylum[
'Total population of concern'].rank(ascending=False, method='min').astype(int)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_asylum['Total refugees and asylum-seekers (asylum)'] = df_populations_refugeelike_asylum[
'Total Refugee and people in refugee-like situations'] + df_populations_refugeelike_asylum['Asylum-seekers (asylum)']
# Drop null values
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_asylum.columns:
metadata_dict[column] = populations_refugeelike_asylum_metadata
#################### POPULATIONS_REFUGEELIKE_ORIGIN ####################
# Get the data from the API
populations_refugeelike_origin_data = requests.get(url_populations_refugeelike_origin).json()
# Extract metadata
if 'metadata' in populations_refugeelike_origin_data:
populations_refugeelike_origin_metadata = populations_refugeelike_origin_data['metadata']
else:
populations_refugeelike_origin_metadata = {}
# Build a dataframe
df_populations_refugeelike_origin = pd.DataFrame(populations_refugeelike_origin_data['data']).T
# Select relevant fields
df_populations_refugeelike_origin = df_populations_refugeelike_origin[[
'Total Refugee and people in refugee-like situations', 'Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_origin.rename(columns={
'Total Refugee and people in refugee-like situations': 'Total refugees who have fled from country',
'Asylum-seekers': 'Asylum-seekers (origin)'
}, inplace=True)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_origin['Total refugees and asylum-seekers (origin)'] = df_populations_refugeelike_origin[
'Total refugees who have fled from country'] + df_populations_refugeelike_origin['Asylum-seekers (origin)']
# Drop null values
df_populations_refugeelike_origin = df_populations_refugeelike_origin.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_origin.columns:
metadata_dict[column] = populations_refugeelike_origin_metadata
#################### INDICATORS GNI ####################
# Get the data from the API
indicators_gni_data = requests.get(url_indicators_gni).json()
# Extract metadata
if 'metadata' in indicators_gni_data:
indicators_gni_metadata = indicators_gni_data['metadata']
else:
indicators_gni_metadata = {}
# Build a dataframe
df_indicators_gni = pd.DataFrame(indicators_gni_data['data']).T
# Select relevant fields
df_indicators_gni = df_indicators_gni[[
'2015'
]]
# Rename fields
df_indicators_gni.rename(columns={'2015': 'GDP Per Capita'}, inplace=True)
# Drop null values
df_indicators_gni = df_indicators_gni.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_indicators_gni.columns:
metadata_dict[column] = indicators_gni_metadata
#################### PLANS PROGRESS ####################
# Get the data from the API
plans_progress_data = requests.get(url_plans_progress).json()
# Extract metadata
if 'metadata' in plans_progress_data:
plans_progress_metadata = plans_progress_data['metadata']
else:
plans_progress_metadata = {}
# Build a dataframe
df_plans_progress = pd.DataFrame(plans_progress_data['data']).T
# Select relevant fields
df_plans_progress = df_plans_progress[[
'appealFunded', 'revisedRequirements', 'neededFunding'
]]
# Rename fields
df_plans_progress.rename(columns={'appealFunded': 'Appeal funds committed to date',
'revisedRequirements': 'Appeal funds requested',
'neededFunding': 'Appeal funds still needed'}, inplace=True)
df_plans_progress['Appeal percent funded'] = df_plans_progress['Appeal funds committed to date']/df_plans_progress['Appeal funds requested']
# Drop null values
df_plans_progress = df_plans_progress.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_plans_progress.columns:
metadata_dict[column] = plans_progress_metadata
# Add an FTS data as-of date so it can be included in the .csv data dump
df_plans_progress['FTS funding data as-of date'] = plans_progress_data['metadata']['source_data']
######## FUNDING BY DESTINATION COUNTRY ############
#Get the data from the API
funding_dest_country_data = requests.get(url_funding_dest_country).json()
# Extract metadata
if 'metadata' in funding_dest_country_data:
funding_dest_country_metadata = funding_dest_country_data['metadata']
else:
funding_dest_country_metadata = {}
# Build a dataframe
df_funding_dest_country = pd.DataFrame(funding_dest_country_data['data']).T
# Select relevant fields
df_funding_dest_country = df_funding_dest_country[[
'totalFunding'
]]
# Keep only records where totalFunding > 0
df_funding_dest_country = df_funding_dest_country[df_funding_dest_country['totalFunding'] > 0]
# Rename fields
df_funding_dest_country.rename(columns={'totalFunding': 'Humanitarian aid received'},
inplace=True)
# Add field to rank total total population of concern
df_funding_dest_country['Rank of humanitarian aid received'] = df_funding_dest_country[
'Humanitarian aid received'].rank(ascending=False, method='min').astype(int)
# Drop null values
df_funding_dest_country = df_funding_dest_country.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_country.columns:
metadata_dict[column] = funding_dest_country_metadata
################## TOP 5 DONORS TO EACH DESTINATION COUNTRY ###################
#Get the data from the API
funding_dest_donors_data = requests.get(url_funding_dest_donors).json()
# Extract metadata
if 'metadata' in funding_dest_donors_data:
funding_dest_donors_metadata = funding_dest_donors_data['metadata']
else:
funding_dest_donors_metadata = {}
# Build a dataframe
df_funding_dest_donors = json_normalize(funding_dest_donors_data['data']).T
#df_funding_dest_donors = pd.DataFrame(funding_dest_donors_data['data']).T
df_funding_dest_donors.columns = (['Top 5 Donors'])
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_donors.columns:
metadata_dict[column] = funding_dest_donors_metadata
#################### NEEDS ####################
# Get the data from the API
needs_data = requests.get(url_needs).json()
# Extract metadata
if 'metadata' in needs_data:
needs_metadata = needs_data['metadata']
else:
needs_metadata = {}
# Build a dataframe
df_needs = pd.DataFrame(needs_data['data']).T
# Exclude rows where country code is missing
df_needs = df_needs.drop('null')
# Select relevant fields
df_needs = df_needs[[
'inNeedTotal', 'inNeedHealth', 'inNeedEducation',
'inNeedFoodSecurity', 'inNeedProtection', 'sourceURL',
'inNeedShelter-CCCM-NFI', 'inNeedWASH', 'sourceType'
]]
# Rename fields
df_needs.rename(columns={'inNeedTotal': 'Total people in need',
'inNeedHealth': 'People in need of health support',
'inNeedEducation': 'Children in need of education',
'inNeedFoodSecurity': 'People who are food insecure',
'inNeedProtection': 'People in need of protection',
'inNeedShelter-CCCM-NFI': 'People in need of shelter',
'inNeedWASH': 'People in need of water, sanitization & hygiene',
'sourceURL': 'Source of needs data',
'sourceType': 'Source type of needs data'
}, inplace=True)
# Add metadata for each field to overall metadata dictionary
for column in df_needs.columns:
metadata_dict[column] = needs_metadata
######## FIND PLACES WITH SIMILAR POPULATIONS TO PEOPLE IN NEED ########
# Get the relateable populations data from .csv
df_relatable_populations = pd.read_csv(relatable_population_path)
df_relatable_populations['Population'] = df_relatable_populations[[
'Population - World Bank (2015)','Population - UNFPA (2016)'
]].max(axis=1)
df_relatable_populations = df_relatable_populations[['City, State, Country','Population']].dropna()
def find_nearest_place_population(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_population = nearest_row['Population']
else:
nearest_population = 0.00
return nearest_population
def find_nearest_place(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_place = nearest_row['City, State, Country']
else:
nearest_place = ''
return nearest_place
df_needs['Place with similar population as people in need'] = df_needs['Total people in need'].apply(
find_nearest_place)
# Add metadata
metadata_dict['Place with similar population as people in need'] = {}
df_needs['Population of place with similar population'] = df_needs['Total people in need'].apply(
find_nearest_place_population)
# Add metadata
metadata_dict['Population of place with similar population'] = {}
#################### SAMPLE CLUSTERS ####################
# Build a dataframe
# df_clusters = pd.read_json('sample_clusters.json').T
# df_clusters = df_clusters[['clusters']]
################# COMBINE ALL DATA ##############
# Make a list of all dataframes
all_dataframes = [
df_country_names,
df_populations_refugeelike_asylum,
df_indicators_gni,
df_plans_progress,
df_population,
df_fragile_state,
df_needs,
df_funding_dest_country,
df_funding_dest_donors,
df_displacement_stories,
df_populations_refugeelike_origin
# df_clusters
]
df_final = pd.concat(all_dataframes, axis=1)
# Add calculation for displaced people as a ratio of total population
df_final['Population of concern per 1000 population'] = (df_final['Total population of concern'] / df_final[
'Population'])*1000
# And metadata
metadata_dict['Population of concern per 1000 population'] = {}
metadata_dict['Population of concern per 1000 population']['Calculation'] = '(Total population of concern / Population) * 1000'
# Add calculation for displaced people per million GDP
df_final['Population of concern per million GDP'] = ((df_final['Total population of concern'] * 1000000) / (df_final[
'GDP Per Capita'] * df_final['Population']))
# And metadata
metadata_dict['Population of concern per million GDP'] = {}
metadata_dict['Population of concern per million GDP']['Calculation'] = '(Total population of concern] * 1000000) / (GDP Per Capita * Population)'
# Add field to specify whether country has current humanitarian appeal in FTS
df_final['Country has current appeal'] = df_final['Appeal funds requested'].notnull()
# And metadata
metadata_dict['Country has current appeal'] = {}
metadata_dict['Country has current appeal']['Calculation'] = 'Is Appeal funds requested not null'
# Make the ranked variables ordinal
def get_ordinal_number(value):
try:
value = int(value)
except ValueError:
return value
if value % 100 // 10 != 1:
if value % 10 == 1:
ordval = u"%d%s" % (value, "st")
elif value % 10 == 2:
ordval = u"%d%s" % (value, "nd")
elif value % 10 == 3:
ordval = u"%d%s" % (value, "rd")
else:
ordval = u"%d%s" % (value, "th")
else:
ordval = u"%d%s" % (value, "th")
return ordval
df_final['Rank of total population of concern'] = df_final['Rank of total population of concern'].apply(
get_ordinal_number)
df_final['Rank of humanitarian aid received'] = df_final['Rank of humanitarian aid received'].apply(
get_ordinal_number)
################## STRUCTURE DICTIONARY ##################
# Clean up NaN values
df_final = df_final.fillna('')
# Transform dataframe to dictionary
df_as_dict = df_final.to_dict(orient='index')
# Define field names for each strand
strand_01_fields = ['Appeal funds still needed', 'Appeal funds requested', 'Appeal funds committed to date',
'Appeal percent funded', 'Source of needs data', 'Source type of needs data',
'Total people in need', 'Place with similar population as people in need',
'Population of place with similar population']
strand_02_fields = ['Population of concern per 1000 population', 'Fragile State Index Score',
'Total population of concern',
'IDPs protected/assisted by UNHCR',
'GDP Per Capita',
'Total refugees and asylum-seekers (asylum)',
'Total refugees and asylum-seekers (origin)']
strand_03_fields = ['Humanitarian aid received', 'Appeal funds requested', 'Appeal percent funded',
'Rank of total population of concern', 'Rank of humanitarian aid received']
needs_fields = ['People in need of health support','Children in need of education',
'People who are food insecure','People in need of protection','People in need of shelter',
'People in need of water, sanitization & hygiene']
story_fields = ['storyTitle', 'storySource', 'storyTagLine', 'storyURL']
# For every object, get / group the values by strand
data = {}
for x in df_as_dict.keys():
# Create an empty dict
country_dict = {}
# Populate the dict with those value that don't require nesting
country_dict['Country'] = df_as_dict[x]['Country']
country_dict['Fragile State Index Rank'] = df_as_dict[x]['Fragile State Index Rank']
country_dict['Country has current appeal'] = df_as_dict[x]['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if df_as_dict[x]['storyURL']:
for field in story_fields:
story_fields_dict[field] = (df_as_dict[x][field])
country_dict['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
if df_as_dict[x]['Country has current appeal']:
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (df_as_dict[x][names_01])
for name in needs_fields:
if df_as_dict[x][name] != '':
strand_01_dict['Needs_Data'][name] = (df_as_dict[x][name])
country_dict['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (df_as_dict[x][names_02])
country_dict['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (df_as_dict[x][names_03])
if df_as_dict[x]['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = df_as_dict[x]['Top 5 Donors']
country_dict['Strand_03_Aid'] = strand_03_dict
# Add the country dict to the data dict
data[x] = country_dict
# Add World totals
# Create an empty dict
world_dict = {}
# Populate the dict with aggregated strand 1 data
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
strand_01_dict['Total people in need'] = df_needs['Total people in need'].sum()
strand_01_dict['Count of current crises with people in need'] = df_needs['Total people in need'].count()
strand_01_dict['Place with similar population as people in need'] = find_nearest_place(
df_needs['Total people in need'].sum()
)
strand_01_dict['Population of place with similar population'] = find_nearest_place_population(
df_needs['Total people in need'].sum()
)
for name in needs_fields:
strand_01_dict['Needs_Data'][name] = df_needs[name].sum()
world_dict['Strand_01_Needs'] = strand_01_dict
# Add the world dict to the data dict
data['WORLD'] = world_dict
# Create the metadata dict
metadata = {}
# Populate the dict with those value that don't require nesting
#metadata['Country'] = metadata_dict['Country']
metadata['Fragile State Index Rank'] = metadata_dict['Fragile State Index Rank']
metadata['Country has current appeal'] = metadata_dict['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if metadata_dict['storyURL']:
for field in story_fields:
story_fields_dict[field] = (metadata_dict[field])
metadata['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (metadata_dict[names_01])
metadata['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (metadata_dict[names_02])
metadata['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (metadata_dict[names_03])
if metadata_dict['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = metadata_dict['Top 5 Donors']
metadata['Strand_03_Aid'] = strand_03_dict
# At the higher level, structure the json with 'data' and 'metadata'
final_json = {
'data': data,
'metadata': metadata
}
return final_json, metadata, df_final
def run():
print 'Pulling and merging data'
final_json, metadata, final_csv = merge_data()
print 'Writing Combined JSON file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.json'), 'w') as outfile:
json.dump(final_json, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined JSON metadata file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker_metadata.json'), 'w') as outfile:
json.dump(metadata, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined CSV file'
final_csv.to_csv(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.csv'), index_label='CountryCode', encoding='utf-8')
if __name__ == "__main__":
run()
| mit |
Tong-Chen/scikit-learn | sklearn/tests/test_cross_validation.py | 4 | 30858 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import make_scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [3, 3, -1, -1, 2]
cv = cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for train, test in cval.StratifiedKFold(labels, 5):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
labels = [0] * 3 + [1] * 14
for skf in [cval.StratifiedKFold(labels[:i], 3) for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model w.r.t. the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
@ignore_warnings
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_leave_label_out_changing_labels():
"""Check that LeaveOneLabelOut and LeavePLabelOut work normally if
the labels variable is changed before calling __iter__"""
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy", labels=np.ones(y.size),
random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = make_scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .97, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y, cv=cv,
scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=False)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=False)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=False)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=False)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=False)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=False)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
assert_equal(np.asarray(train).dtype.kind, 'b')
assert_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=True)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=True)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=True)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=True)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=True)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
@ignore_warnings
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
| bsd-3-clause |
ahoyosid/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
DESatAPSU/DAWDs | python/origBandpass_FITSToCSV.py | 1 | 1930 | # Converts STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits to
# y3a2_std_passband_extend3000_ugrizYatm.csv
#
# To run (bash):
# python origBandpass_FITSToCSV.py > origBandpass_FITSToCSV.log 2>&1 &
#
# To run (tcsh):
# python origBandpass_FITSToCSV.py >& origBandpass_FITSToCSV.log &
#
# DLT, 2017-06-30
# based in part on scripts by Jack Mueller and Jacob Robertson.
# Initial setup...
import numpy as np
import pandas as pd
import os
import string
import shutil
import pyfits
# Be sure to edit these next two line2 appropriately...
bandsDir = '/Users/dtucker/IRAF/DECam/StdBands_Y3A2_extend3000'
inputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits'
# List of filter bands (plus atm)...
bandList = ['g', 'r', 'i', 'z', 'Y', 'atm']
# Read in inputFile to create a reformatted version in CSV format...
hdulist = pyfits.open(inputFile)
tbdata = hdulist[1].data
# Create lists from each column...
lambdaList = tbdata['LAMBDA'].tolist()
gList = tbdata['g'].tolist()
rList = tbdata['r'].tolist()
iList = tbdata['i'].tolist()
zList = tbdata['z'].tolist()
YList = tbdata['Y'].tolist()
atmList = tbdata['atm'].tolist()
# Create pandas dataframe from the lists...
df = pd.DataFrame(np.column_stack([lambdaList,gList,rList,iList,zList,YList,atmList]),
columns=['lambda','g','r','i','z','Y','atm'])
# Output the full table as a CSV file
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
df.to_csv(outputFile,index=False)
# Output individual bands (+atm)...
for band in bandList:
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.'+band+'.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
columnNames = ['lambda',band]
df.to_csv(outputFile,index=False,columns=columnNames,header=False)
# Finis!
exit()
| mit |
nmartensen/pandas | asv_bench/benchmarks/gil.py | 7 | 11003 | from .pandas_vb_common import *
from pandas.core.algorithms import take_1d
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
class NoGilGroupby(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
np.random.seed(1234)
self.size = 2 ** 22
self.ngroups = 100
self.data = Series(np.random.randint(0, self.ngroups, size=self.size))
if (not have_real_test_parallel):
raise NotImplementedError
@test_parallel(num_threads=2)
def _pg2_count(self):
self.df.groupby('key')['data'].count()
def time_count_2(self):
self._pg2_count()
@test_parallel(num_threads=2)
def _pg2_last(self):
self.df.groupby('key')['data'].last()
def time_last_2(self):
self._pg2_last()
@test_parallel(num_threads=2)
def _pg2_max(self):
self.df.groupby('key')['data'].max()
def time_max_2(self):
self._pg2_max()
@test_parallel(num_threads=2)
def _pg2_mean(self):
self.df.groupby('key')['data'].mean()
def time_mean_2(self):
self._pg2_mean()
@test_parallel(num_threads=2)
def _pg2_min(self):
self.df.groupby('key')['data'].min()
def time_min_2(self):
self._pg2_min()
@test_parallel(num_threads=2)
def _pg2_prod(self):
self.df.groupby('key')['data'].prod()
def time_prod_2(self):
self._pg2_prod()
@test_parallel(num_threads=2)
def _pg2_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_2(self):
self._pg2_sum()
@test_parallel(num_threads=4)
def _pg4_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_4(self):
self._pg4_sum()
def time_sum_4_notp(self):
for i in range(4):
self.df.groupby('key')['data'].sum()
def _f_sum(self):
self.df.groupby('key')['data'].sum()
@test_parallel(num_threads=8)
def _pg8_sum(self):
self._f_sum()
def time_sum_8(self):
self._pg8_sum()
def time_sum_8_notp(self):
for i in range(8):
self._f_sum()
@test_parallel(num_threads=2)
def _pg2_var(self):
self.df.groupby('key')['data'].var()
def time_var_2(self):
self._pg2_var()
# get groups
def _groups(self):
self.data.groupby(self.data).groups
@test_parallel(num_threads=2)
def _pg2_groups(self):
self._groups()
def time_groups_2(self):
self._pg2_groups()
@test_parallel(num_threads=4)
def _pg4_groups(self):
self._groups()
def time_groups_4(self):
self._pg4_groups()
@test_parallel(num_threads=8)
def _pg8_groups(self):
self._groups()
def time_groups_8(self):
self._pg8_groups()
class nogil_take1d_float64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_float64(self):
self.take_1d_pg2_int64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_take1d_int64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_int64(self):
self.take_1d_pg2_float64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_kth_smallest(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.N = 10000000
self.k = 500000
self.a = np.random.randn(self.N)
self.b = self.a.copy()
self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]
def time_nogil_kth_smallest(self):
@test_parallel(num_threads=2, kwargs_list=self.kwargs_list)
def run(arr):
algos.kth_smallest(arr, self.k)
run()
class nogil_datetime_fields(object):
goal_time = 0.2
def setup(self):
self.N = 100000000
self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T')
self.period = self.dti.to_period('D')
if (not have_real_test_parallel):
raise NotImplementedError
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period('S')
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class nogil_rolling_algos_slow(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(100000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_median(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_median(arr, win)
run(self.arr, self.win)
class nogil_rolling_algos_fast(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(1000000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_mean(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_mean(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_min(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_min(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_max(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_max(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_var(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_var(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_skew(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_skew(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_kurt(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_kurt(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_std(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_std(arr, win)
run(self.arr, self.win)
class nogil_read_csv(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
# Using the values
self.df = DataFrame(np.random.randn(10000, 50))
self.df.to_csv('__test__.csv')
self.rng = date_range('1/1/2000', periods=10000)
self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)
self.df_date_time.to_csv('__test_datetime__.csv')
self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))
self.df_object.to_csv('__test_object__.csv')
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
@test_parallel(num_threads=2)
def pg_read_csv(self):
read_csv('__test__.csv', sep=',', header=None, float_precision=None)
def time_read_csv(self):
self.pg_read_csv()
@test_parallel(num_threads=2)
def pg_read_csv_object(self):
read_csv('__test_object__.csv', sep=',')
def time_read_csv_object(self):
self.pg_read_csv_object()
@test_parallel(num_threads=2)
def pg_read_csv_datetime(self):
read_csv('__test_datetime__.csv', sep=',', header=None)
def time_read_csv_datetime(self):
self.pg_read_csv_datetime()
class nogil_factorize(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.strings = tm.makeStringIndex(100000)
def factorize_strings(self):
pd.factorize(self.strings)
@test_parallel(num_threads=4)
def _pg_factorize_strings_4(self):
self.factorize_strings()
def time_factorize_strings_4(self):
for i in range(2):
self._pg_factorize_strings_4()
@test_parallel(num_threads=2)
def _pg_factorize_strings_2(self):
self.factorize_strings()
def time_factorize_strings_2(self):
for i in range(4):
self._pg_factorize_strings_2()
def time_factorize_strings(self):
for i in range(8):
self.factorize_strings()
| bsd-3-clause |
great-expectations/great_expectations | tests/datasource/test_batch_generators.py | 1 | 6706 | import os
from great_expectations.datasource.batch_kwargs_generator import (
DatabricksTableBatchKwargsGenerator,
GlobReaderBatchKwargsGenerator,
SubdirReaderBatchKwargsGenerator,
)
try:
from unittest import mock
except ImportError:
from unittest import mock
def test_file_kwargs_generator(
data_context_parameterized_expectation_suite, filesystem_csv
):
base_dir = filesystem_csv
datasource = data_context_parameterized_expectation_suite.add_datasource(
"default",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(base_dir),
}
},
)
generator = datasource.get_batch_kwargs_generator("subdir_reader")
known_data_asset_names = datasource.get_available_data_asset_names()
# Use set to avoid order dependency
assert set(known_data_asset_names["subdir_reader"]["names"]) == {
("f1", "file"),
("f2", "file"),
("f3", "directory"),
}
f1_batches = [
batch_kwargs["path"]
for batch_kwargs in generator.get_iterator(data_asset_name="f1")
]
assert len(f1_batches) == 1
expected_batches = [{"path": os.path.join(base_dir, "f1.csv")}]
for batch in expected_batches:
assert batch["path"] in f1_batches
f3_batches = [
batch_kwargs["path"]
for batch_kwargs in generator.get_iterator(data_asset_name="f3")
]
assert len(f3_batches) == 2
expected_batches = [
{"path": os.path.join(base_dir, "f3", "f3_20190101.csv")},
{"path": os.path.join(base_dir, "f3", "f3_20190102.csv")},
]
for batch in expected_batches:
assert batch["path"] in f3_batches
def test_glob_reader_generator(basic_pandas_datasource, tmp_path_factory):
"""Provides an example of how glob generator works: we specify our own
names for data_assets, and an associated glob; the generator
will take care of providing batches consisting of one file per
batch corresponding to the glob."""
basedir = str(tmp_path_factory.mktemp("test_glob_reader_generator"))
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f5.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f6.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g2 = GlobReaderBatchKwargsGenerator(
base_directory=basedir,
datasource=basic_pandas_datasource,
asset_globs={"blargs": {"glob": "*.blarg"}, "fs": {"glob": "f*"}},
)
g2_assets = g2.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g2_assets["names"]) == {("blargs", "path"), ("fs", "path")}
blargs_kwargs = [x["path"] for x in g2.get_iterator(data_asset_name="blargs")]
real_blargs = [
os.path.join(basedir, "f1.blarg"),
os.path.join(basedir, "f3.blarg"),
os.path.join(basedir, "f4.blarg"),
os.path.join(basedir, "f5.blarg"),
os.path.join(basedir, "f6.blarg"),
]
for kwargs in real_blargs:
assert kwargs in blargs_kwargs
assert len(blargs_kwargs) == len(real_blargs)
def test_file_kwargs_generator_extensions(tmp_path_factory):
"""csv, xls, parquet, json should be recognized file extensions"""
basedir = str(tmp_path_factory.mktemp("test_file_kwargs_generator_extensions"))
# Do not include: invalid extension
with open(os.path.join(basedir, "f1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include
with open(os.path.join(basedir, "f2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid subdir, but no valid files in it
os.mkdir(os.path.join(basedir, "f3"))
with open(os.path.join(basedir, "f3", "f3_1.blarg"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f3", "f3_2.blarg"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid subdir with valid files
os.mkdir(os.path.join(basedir, "f4"))
with open(os.path.join(basedir, "f4", "f4_1.csv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f4", "f4_2.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Do not include: valid extension, but dot prefix
with open(os.path.join(basedir, ".f5.csv"), "w") as outfile:
outfile.write("\n\n\n")
# Include: valid extensions
with open(os.path.join(basedir, "f6.tsv"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f7.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f8.parquet"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f9.xls"), "w") as outfile:
outfile.write("\n\n\n")
with open(os.path.join(basedir, "f0.json"), "w") as outfile:
outfile.write("\n\n\n")
g1 = SubdirReaderBatchKwargsGenerator(datasource="foo", base_directory=basedir)
g1_assets = g1.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(g1_assets["names"]) == {
("f7", "file"),
("f4", "directory"),
("f6", "file"),
("f0", "file"),
("f2", "file"),
("f9", "file"),
("f8", "file"),
}
def test_databricks_generator(basic_sparkdf_datasource):
generator = DatabricksTableBatchKwargsGenerator(datasource=basic_sparkdf_datasource)
available_assets = generator.get_available_data_asset_names()
# We have no tables available
assert available_assets == {"names": []}
databricks_kwargs_iterator = generator.get_iterator(data_asset_name="foo")
kwargs = [batch_kwargs for batch_kwargs in databricks_kwargs_iterator]
assert "select * from" in kwargs[0]["query"].lower()
| apache-2.0 |
jblackburne/scikit-learn | sklearn/gaussian_process/gpc.py | 42 | 31571 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
untom/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
dmargala/qusp | examples/compare_delta.py | 1 | 7364 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib.pyplot as plt
import scipy.interpolate
import fitsio
class DeltaLOS(object):
def __init__(self, thing_id):
path = '/data/lya/deltas/delta-%d.fits' % thing_id
hdulist = fitsio.FITS(path, mode=fitsio.READONLY)
self.pmf = hdulist[1].read_header()['pmf']
self.loglam = hdulist[1]['loglam'][:]
self.wave = np.power(10.0, self.loglam)
self.delta = hdulist[1]['delta'][:]
self.weight = hdulist[1]['weight'][:]
self.cont = hdulist[1]['cont'][:]
self.msha = hdulist[1]['msha'][:]
self.mabs = hdulist[1]['mabs'][:]
self.ivar = hdulist[1]['ivar'][:]
self.cf = self.cont*self.msha*self.mabs
self.flux = (1+self.delta)*self.cf
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
## targets to fit
parser.add_argument("--name", type=str, default=None,
help="target list")
parser.add_argument("--gamma", type=float, default=3.8,
help="LSS growth and redshift evolution of mean absorption gamma")
parser.add_argument("--index", type=int, default=1000,
help="target index")
parser.add_argument("--pmf", type=str, default=None,
help="target plate-mjd-fiber string")
args = parser.parse_args()
print 'Loading forest data...'
# import data
skim = h5py.File(args.name+'.hdf5', 'r')
if args.pmf:
plate, mjd, fiber = [int(val) for val in args.pmf.split('-')]
index = np.where((skim['meta']['plate'] == plate) & (skim['meta']['mjd'] == mjd) & (skim['meta']['fiber'] == fiber))[0][0]
else:
index = args.index
flux = np.ma.MaskedArray(skim['flux'][index], mask=skim['mask'][index])
ivar = np.ma.MaskedArray(skim['ivar'][index], mask=skim['mask'][index])
loglam = skim['loglam'][:]
wave = np.power(10.0, loglam)
z = skim['z'][index]
norm = skim['norm'][index]
meta = skim['meta'][index]
linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')
a = linear_continuum['params_a'][index]
b = linear_continuum['params_b'][index]
continuum = linear_continuum['continuum']
continuum_wave = linear_continuum['continuum_wave']
continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, ext=1, s=0)
abs_alpha = linear_continuum.attrs['abs_alpha']
abs_beta = linear_continuum.attrs['abs_beta']
forest_wave_ref = (1+z)*linear_continuum.attrs['forest_wave_ref']
wave_lya = linear_continuum.attrs['wave_lya']
forest_pixel_redshifts = wave/wave_lya - 1
abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)
print 'flux 1280 Ang: %.2f' % norm
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
def model_flux(a, b):
return a*np.power(wave/forest_wave_ref, b)*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
def chisq(p):
mflux = model_flux(p[0], p[1])
res = flux - mflux
return ma.sum(res*res*ivar)/ma.sum(ivar)
from scipy.optimize import minimize
result = minimize(chisq, (a, b))
a,b = result.x
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
# rest and obs refer to pixel grid
print 'Estimating deltas in forest frame...'
mflux = model_flux(a,b)
delta_flux = flux/mflux - 1.0
delta_ivar = ivar*mflux*mflux
forest_min_z = linear_continuum.attrs['forest_min_z']
forest_max_z = linear_continuum.attrs['forest_max_z']
forest_dz = 0.1
forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)
print 'Adjusting weights for pipeline variance and LSS variance...'
var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)
var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)
delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)
delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts))
thing_id = meta['thing_id']
pmf = '%s-%s-%s' % (meta['plate'],meta['mjd'],meta['fiber'])
los = DeltaLOS(thing_id)
my_msha = norm*a*np.power(wave/forest_wave_ref, b)
my_wave = wave
my_flux = norm*flux
my_cf = my_msha*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
my_ivar = ivar/(norm*norm)
my_delta = delta_flux
my_weight = delta_weight
# mean_ratio = np.average(my_msha*continuum)/ma.average(los.msha*los.cont)
# print mean_ratio
plt.figure(figsize=(12,4))
plt.plot(my_wave, my_flux, color='gray')
my_dflux = ma.power(my_ivar, -0.5)
plt.fill_between(my_wave, my_flux - my_dflux, my_flux + my_dflux, color='gray', alpha=0.5)
plt.plot(my_wave, my_msha*continuum_interp(wave/(1+z)), label='My continuum', color='blue')
plt.plot(los.wave, los.cont, label='Busca continuum', color='red')
plt.plot(my_wave, my_cf, label='My cf', color='green')
plt.plot(los.wave, los.cf, label='Busca cf', color='orange')
plt.legend()
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Observed Flux')
plt.xlim(los.wave[[0,-1]])
plt.savefig(args.name+'-example-flux.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,4))
my_delta_sigma = ma.power(delta_weight, -0.5)
# plt.fill_between(my_wave, my_delta - my_delta_sigma, my_delta + my_delta_sigma, color='blue', alpha=0.1, label='My Delta')
plt.scatter(my_wave, my_delta, color='blue', marker='+', label='My Delta')
plt.plot(my_wave, +my_delta_sigma, color='blue', ls=':')
plt.plot(my_wave, -my_delta_sigma, color='blue', ls=':')
los_delta_sigma = ma.power(los.weight, -0.5)
# plt.fill_between(los.wave, los.delta - los_delta_sigma, los.delta + los_delta_sigma, color='red', alpha=01, label='Busca Delta')
plt.scatter(los.wave, los.delta, color='red', marker='+', label='Busca Delta')
plt.plot(los.wave, +los_delta_sigma, color='red', ls=':')
plt.plot(los.wave, -los_delta_sigma, color='red', ls=':')
my_lss_sigma = np.sqrt(var_lss(forest_pixel_redshifts))
plt.plot(my_wave, +my_lss_sigma, color='black', ls='--')
plt.plot(my_wave, -my_lss_sigma, color='black', ls='--')
# my_sn_sigma = np.sqrt(np.power(1 + forest_pixel_redshifts, 0.5*abs_beta))/10
# plt.plot(my_wave, +my_sn_sigma, color='orange', ls='--')
# plt.plot(my_wave, -my_sn_sigma, color='orange', ls='--')
# import matplotlib.patches as mpatches
#
# blue_patch = mpatches.Patch(color='blue', alpha=0.3, label='My Delta')
# red_patch = mpatches.Patch(color='red', alpha=0.3, label='Busca Delta')
# plt.legend(handles=[blue_patch,red_patch])
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.ylim(-2,2)
plt.xlim(los.wave[[0,-1]])
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta')
plt.legend()
plt.savefig(args.name+'-example-delta.png', dpi=100, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
guildai/guild | examples/iris-svm/plot_iris_exercise.py | 1 | 1702 | """
A tutorial exercise for using different SVM kernels.
Adapted from:
https://scikit-learn.org/stable/auto_examples/exercises/plot_iris_exercise.html
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn import datasets, svm
kernel = 'linear' # choice of linear, rbf, poly
test_split = 0.1
random_seed = 0
degree = 3
gamma = 10
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(random_seed)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
split_pos = int((1 - test_split) * n_sample)
X_train = X[:split_pos]
y_train = y[:split_pos]
X_test = X[split_pos:]
y_test = y[split_pos:]
# fit the model
clf = svm.SVC(kernel=kernel, degree=degree, gamma=gamma)
clf.fit(X_train, y_train)
print("Train accuracy: %s" % clf.score(X_train, y_train))
print("Test accuracy: %f" % clf.score(X_test, y_test))
plt.figure()
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,
edgecolor='k', s=20)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',
zorder=10, edgecolor='k')
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.savefig("plot.png")
| apache-2.0 |
jzt5132/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
nmartensen/pandas | asv_bench/benchmarks/categoricals.py | 3 | 2803 | from .pandas_vb_common import *
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Categoricals(object):
goal_time = 0.2
def setup(self):
N = 100000
self.s = pd.Series((list('aabbcd') * N)).astype('category')
self.a = pd.Categorical((list('aabbcd') * N))
self.b = pd.Categorical((list('bbcdjk') * N))
self.categories = list('abcde')
self.cat_idx = Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range(
'1995-01-01 00:00:00', periods=10000, freq='s'))
def time_concat(self):
concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
def time_constructor_regular(self):
Categorical(self.values, self.categories)
def time_constructor_fastpath(self):
Categorical(self.codes, self.cat_idx, fastpath=True)
def time_constructor_datetimes(self):
Categorical(self.datetimes)
def time_constructor_datetimes_with_nat(self):
t = self.datetimes
t.iloc[-1] = pd.NaT
Categorical(t)
class Categoricals2(object):
goal_time = 0.2
def setup(self):
n = 500000
np.random.seed(2718281)
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = Series(arr).astype('category')
self.sel = self.ts.loc[[0]]
def time_value_counts(self):
self.ts.value_counts(dropna=False)
def time_value_counts_dropna(self):
self.ts.value_counts(dropna=True)
def time_rendering(self):
str(self.sel)
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class Categoricals3(object):
goal_time = 0.2
def setup(self):
N = 100000
ncats = 100
self.s1 = Series(np.array(tm.makeCategoricalIndex(N, ncats)))
self.s1_cat = self.s1.astype('category')
self.s1_cat_ordered = self.s1.astype('category', ordered=True)
self.s2 = Series(np.random.randint(0, ncats, size=N))
self.s2_cat = self.s2.astype('category')
self.s2_cat_ordered = self.s2.astype('category', ordered=True)
def time_rank_string(self):
self.s1.rank()
def time_rank_string_cat(self):
self.s1_cat.rank()
def time_rank_string_cat_ordered(self):
self.s1_cat_ordered.rank()
def time_rank_int(self):
self.s2.rank()
def time_rank_int_cat(self):
self.s2_cat.rank()
def time_rank_int_cat_ordered(self):
self.s2_cat_ordered.rank()
| bsd-3-clause |
YinongLong/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
newville/scikit-image | doc/examples/plot_rank_mean.py | 17 | 1499 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
| bsd-3-clause |
dipanjanS/text-analytics-with-python | Old-First-Edition/Ch06_Text_Similarity_and_Clustering/utils.py | 1 | 1097 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 11 23:06:06 2016
@author: DIP
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def build_feature_matrix(documents, feature_type='frequency',
ngram_range=(1, 1), min_df=0.0, max_df=1.0):
feature_type = feature_type.lower().strip()
if feature_type == 'binary':
vectorizer = CountVectorizer(binary=True, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'frequency':
vectorizer = CountVectorizer(binary=False, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'tfidf':
vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df,
ngram_range=ngram_range)
else:
raise Exception("Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'")
feature_matrix = vectorizer.fit_transform(documents).astype(float)
return vectorizer, feature_matrix | apache-2.0 |
billy-inn/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
hrjn/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
mne-tools/mne-python | mne/viz/circle.py | 14 | 15879 | """Functions to plot on circle as for connectivity."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(n_node_names,)
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int64)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolate connections around a single node when user left clicks a node.
On right click, resets all connections.
"""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of array | None
Two arrays with indices of connections for which the connections
strengths are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape (n_node_names,) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuple | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str | instance of matplotlib.colors.LinearSegmentedColormap
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : tuple, shape (2,)
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.figure.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | tuple, shape (3,)
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure handle.
axes : instance of matplotlib.projections.polar.PolarAxes
The subplot handle.
Notes
-----
This code is based on a circle graph example by Nicolas P. Rougier
By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``
into account when saving, even if set when a figure is generated. This
can be addressed via, e.g.::
>>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP
If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the
figure labels, title, and legend may be cut off in the output figure.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
try:
spectral = plt.cm.spectral
except AttributeError:
spectral = plt.cm.Spectral
node_colors = [spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True)
axes.set_facecolor(facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int64)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| bsd-3-clause |
zorroblue/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
smorante/continuous-goal-directed-actions | demonstration-feature-selection/src/alternatives/main_dtw_mds_norm.py | 2 | 3731 | # -*- coding: utf-8 -*-
"""
Author: Santiago Morante
Robotics Lab. Universidad Carlos III de Madrid
"""
########################## DTW ####################################
import libmddtw
import matplotlib.pyplot as plt
from dtw import dtw
########################## MDS ####################################
import numpy as np
from sklearn.metrics import euclidean_distances
import libmds
########################## DBSCAN ####################################
import libdbscan
from sklearn.preprocessing import StandardScaler # to normalize
def normalize(X):
return StandardScaler().fit_transform(X)
def main():
NUMBER_OF_DEMONSTRATIONS=5
##########################################################################
########################## DTW ####################################
##########################################################################
dist=np.zeros((NUMBER_OF_DEMONSTRATIONS,NUMBER_OF_DEMONSTRATIONS))
demons=[]
# fill demonstrations
for i in range(NUMBER_OF_DEMONSTRATIONS):
demons.append(np.matrix([ np.sin(np.arange(15+i)+i) , np.sin(np.arange(15+i)+i)]))
# fill distance matrix
for i in range(NUMBER_OF_DEMONSTRATIONS):
for j in range(NUMBER_OF_DEMONSTRATIONS):
mddtw = libmddtw.Mddtw()
x,y = mddtw.collapseRows(demons[i],demons[j])
#fig = plt.figure()
#plt.plot(x)
#plt.plot(y)
singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])
dist[i][j]=singleDist
# print 'Minimum distance found:', singleDist
#fig = plt.figure()
# plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')
# plt.plot(path[0], path[1], 'w')
# plt.xlim((-0.5, cost.shape[0]-0.5))
# plt.ylim((-0.5, cost.shape[1]-0.5))
# print "dist", dist
###########################################################################
########################### MDS ####################################
###########################################################################
md = libmds.Mds()
md.create(n_components=1, metric=False, max_iter=3000, eps=1e-9, random_state=None,
dissimilarity="precomputed", n_jobs=1)
points = md.compute(dist)
print "points", points.flatten()
# md.plot()
##########################################################################
########################## norm ####################################
##########################################################################
from scipy.stats import norm
from numpy import linspace
from pylab import plot,show,hist,figure,title
param = norm.fit(points.flatten()) # distribution fitting
x = linspace(np.min(points),np.max(points),NUMBER_OF_DEMONSTRATIONS)
pdf_fitted = norm.pdf(x, loc=param[0],scale=param[1])
fig = plt.figure()
title('Normal distribution')
plot(x,pdf_fitted,'r-')
hist(points.flatten(),normed=1,alpha=.3)
show()
for elem in points:
if elem <= np.mean(points):
print "probability of point ", str(elem), ": ", norm.cdf(elem, loc=param[0],scale=param[1])
if elem > np.mean(points):
print "probability of point ", str(elem), ": ", 1-norm.cdf(elem, loc=param[0],scale=param[1])
##############################################################################
##############################################################################
if __name__ == "__main__":
main() | mit |
wavycloud/pyboto3 | pyboto3/glue.py | 1 | 692979 | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def batch_create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInputList=None):
"""
Creates one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInputList=[
{
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the catalog in which the partition is to be created. Currently, this should be the AWS account ID.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInputList: list
:param PartitionInputList: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be created.\n\n(dict) --The structure used to create and update a partition.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to create the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_connection(CatalogId=None, ConnectionNameList=None):
"""
Deletes a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_connection(
CatalogId='string',
ConnectionNameList=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type ConnectionNameList: list
:param ConnectionNameList: [REQUIRED]\nA list of names of the connections to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
Response Structure
(dict) --
Succeeded (list) --
A list of names of the connection definitions that were successfully deleted.
(string) --
Errors (dict) --
A map of the names of connections that were not successfully deleted to error details.
(string) --
(dict) --
Contains details about an error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
:returns:
(string) --
"""
pass
def batch_delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToDelete=None):
"""
Deletes one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToDelete=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partitions to be deleted.\n
:type PartitionsToDelete: list
:param PartitionsToDelete: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be deleted.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to delete the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_table(CatalogId=None, DatabaseName=None, TablesToDelete=None):
"""
Deletes multiple tables at once.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table(
CatalogId='string',
DatabaseName='string',
TablesToDelete=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the tables to delete reside. For Hive compatibility, this name is entirely lowercase.\n
:type TablesToDelete: list
:param TablesToDelete: [REQUIRED]\nA list of the table to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered in attempting to delete the specified tables.
(dict) --
An error record for table operations.
TableName (string) --
The name of the table. For Hive compatibility, this must be entirely lowercase.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionIds=None):
"""
Deletes a specified batch of versions of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionIds=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionIds: list
:param VersionIds: [REQUIRED]\nA list of the IDs of versions to be deleted. A VersionId is a string representation of an integer. Each version is incremented by 1.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered while trying to delete the specified table versions.
(dict) --
An error record for table-version operations.
TableName (string) --
The name of the table in question.
VersionId (string) --
The ID value of the version in question. A VersionID is a string representation of an integer. Each version is incremented by 1.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_get_crawlers(CrawlerNames=None):
"""
Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_crawlers(
CrawlerNames=[
'string',
]
)
:type CrawlerNames: list
:param CrawlerNames: [REQUIRED]\nA list of crawler names, which might be the names returned from the ListCrawlers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
Response Structure
(dict) --
Crawlers (list) --A list of crawler definitions.
(dict) --Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
CrawlersNotFound (list) --A list of names of crawlers that were not found.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_dev_endpoints(DevEndpointNames=None):
"""
Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_dev_endpoints(
DevEndpointNames=[
'string',
]
)
:type DevEndpointNames: list
:param DevEndpointNames: [REQUIRED]\nThe list of DevEndpoint names, which might be the names returned from the ListDevEndpoint operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
Response Structure
(dict) --
DevEndpoints (list) --A list of DevEndpoint definitions.
(dict) --A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
DevEndpointsNotFound (list) --A list of DevEndpoints not found.
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_jobs(JobNames=None):
"""
Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_jobs(
JobNames=[
'string',
]
)
:type JobNames: list
:param JobNames: [REQUIRED]\nA list of job names, which might be the names returned from the ListJobs operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
Response Structure
(dict) --
Jobs (list) --A list of job definitions.
(dict) --Specifies a job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
JobsNotFound (list) --A list of names of jobs not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToGet=None):
"""
Retrieves partitions in a batch request.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToGet=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type PartitionsToGet: list
:param PartitionsToGet: [REQUIRED]\nA list of partition values identifying the partitions to retrieve.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
Response Structure
(dict) --
Partitions (list) --
A list of the requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
UnprocessedKeys (list) --
A list of the partition values in the request for which partitions were not returned.
(dict) --
Contains a list of values defining partitions.
Values (list) --
The list of values.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def batch_get_triggers(TriggerNames=None):
"""
Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_triggers(
TriggerNames=[
'string',
]
)
:type TriggerNames: list
:param TriggerNames: [REQUIRED]\nA list of trigger names, which may be the names returned from the ListTriggers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
Response Structure
(dict) --
Triggers (list) --A list of trigger definitions.
(dict) --Information about a specific trigger.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
TriggersNotFound (list) --A list of names of triggers not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_workflows(Names=None, IncludeGraph=None):
"""
Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_workflows(
Names=[
'string',
],
IncludeGraph=True|False
)
:type Names: list
:param Names: [REQUIRED]\nA list of workflow names, which may be the names returned from the ListWorkflows operation.\n\n(string) --\n\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
Response Structure
(dict) --
Workflows (list) --
A list of workflow resource metadata.
(dict) --
A workflow represents a flow in which AWS Glue components should be executed to complete a logical task.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
MissingWorkflows (list) --
A list of names of workflows not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_stop_job_run(JobName=None, JobRunIds=None):
"""
Stops one or more job runs for a specified job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_stop_job_run(
JobName='string',
JobRunIds=[
'string',
]
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to stop job runs.\n
:type JobRunIds: list
:param JobRunIds: [REQUIRED]\nA list of the JobRunIds that should be stopped for that job definition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
SuccessfulSubmissions (list) --
A list of the JobRuns that were successfully submitted for stopping.
(dict) --
Records a successful request to stop a specified JobRun .
JobName (string) --
The name of the job definition used in the job run that was stopped.
JobRunId (string) --
The JobRunId of the job run that was stopped.
Errors (list) --
A list of the errors that were encountered in trying to stop JobRuns , including the JobRunId for which each error was encountered and details about the error.
(dict) --
Records an error that occurred when attempting to stop a specified job run.
JobName (string) --
The name of the job definition that is used in the job run in question.
JobRunId (string) --
The JobRunId of the job run in question.
ErrorDetail (dict) --
Specifies details about the error that was encountered.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def cancel_ml_task_run(TransformId=None, TaskRunId=None):
"""
Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run\'s parent transform\'s TransformID and the task run\'s TaskRunId .
See also: AWS API Documentation
Exceptions
:example: response = client.cancel_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nA unique identifier for the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the machine learning transform.
TaskRunId (string) --
The unique identifier for the task run.
Status (string) --
The status for this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Creates a classifier in the user\'s account. This can be a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field of the request is present.
See also: AWS API Documentation
Exceptions
:example: response = client.create_classifier(
GrokClassifier={
'Classification': 'string',
'Name': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Classification': 'string',
'Name': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nName (string) -- [REQUIRED]The name of the new classifier.\n\nGrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def create_connection(CatalogId=None, ConnectionInput=None):
"""
Creates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_connection(
CatalogId='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object defining the connection to create.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None, Tags=None):
"""
Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.
See also: AWS API Documentation
Exceptions
:example: response = client.create_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string',
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: [REQUIRED]\nThe IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.\n
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: [REQUIRED]\nA list of collection of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:type Tags: dict
:param Tags: The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {}
:returns:
(dict) --
"""
pass
def create_database(CatalogId=None, DatabaseInput=None):
"""
Creates a new database in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_database(
CatalogId='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nThe metadata for the database.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_dev_endpoint(EndpointName=None, RoleArn=None, SecurityGroupIds=None, SubnetId=None, PublicKey=None, PublicKeys=None, NumberOfNodes=None, WorkerType=None, GlueVersion=None, NumberOfWorkers=None, ExtraPythonLibsS3Path=None, ExtraJarsS3Path=None, SecurityConfiguration=None, Tags=None, Arguments=None):
"""
Creates a new development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.create_dev_endpoint(
EndpointName='string',
RoleArn='string',
SecurityGroupIds=[
'string',
],
SubnetId='string',
PublicKey='string',
PublicKeys=[
'string',
],
NumberOfNodes=123,
WorkerType='Standard'|'G.1X'|'G.2X',
GlueVersion='string',
NumberOfWorkers=123,
ExtraPythonLibsS3Path='string',
ExtraJarsS3Path='string',
SecurityConfiguration='string',
Tags={
'string': 'string'
},
Arguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name to be assigned to the new DevEndpoint .\n
:type RoleArn: string
:param RoleArn: [REQUIRED]\nThe IAM role for the DevEndpoint .\n
:type SecurityGroupIds: list
:param SecurityGroupIds: Security group IDs for the security groups to be used by the new DevEndpoint .\n\n(string) --\n\n
:type SubnetId: string
:param SubnetId: The subnet ID for the new DevEndpoint to use.
:type PublicKey: string
:param PublicKey: The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
:type PublicKeys: list
:param PublicKeys: A list of public keys to be used by the development endpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n
:type NumberOfNodes: integer
:param NumberOfNodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint .
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type ExtraPythonLibsS3Path: string
:param ExtraPythonLibsS3Path: The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\n\n
:type ExtraJarsS3Path: string
:param ExtraJarsS3Path: The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this DevEndpoint .
:type Tags: dict
:param Tags: The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type Arguments: dict
:param Arguments: A map of arguments used to configure the DevEndpoint .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
Response Structure
(dict) --
EndpointName (string) --
The name assigned to the new DevEndpoint .
Status (string) --
The current status of the new DevEndpoint .
SecurityGroupIds (list) --
The security groups assigned to the new DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID assigned to the new DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint .
YarnEndpointAddress (string) --
The address of the YARN endpoint used by this DevEndpoint .
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint .
ExtraJarsS3Path (string) --
Path to one or more Java .jar files in an S3 bucket that will be loaded in your DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure being used with this DevEndpoint .
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
Arguments (dict) --
The map of arguments used to configure this DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
:returns:
(string) --
"""
pass
def create_job(Name=None, Description=None, LogUri=None, Role=None, ExecutionProperty=None, Command=None, DefaultArguments=None, NonOverridableArguments=None, Connections=None, MaxRetries=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, Tags=None, NotificationProperty=None, GlueVersion=None, NumberOfWorkers=None, WorkerType=None):
"""
Creates a new job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_job(
Name='string',
Description='string',
LogUri='string',
Role='string',
ExecutionProperty={
'MaxConcurrentRuns': 123
},
Command={
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
DefaultArguments={
'string': 'string'
},
NonOverridableArguments={
'string': 'string'
},
Connections={
'Connections': [
'string',
]
},
MaxRetries=123,
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
Tags={
'string': 'string'
},
NotificationProperty={
'NotifyDelayAfter': 123
},
GlueVersion='string',
NumberOfWorkers=123,
WorkerType='Standard'|'G.1X'|'G.2X'
)
:type Name: string
:param Name: [REQUIRED]\nThe name you assign to this job definition. It must be unique in your account.\n
:type Description: string
:param Description: Description of the job being defined.
:type LogUri: string
:param LogUri: This field is reserved for future use.
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n
:type ExecutionProperty: dict
:param ExecutionProperty: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n
:type Command: dict
:param Command: [REQUIRED]\nThe JobCommand that executes this job.\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n
:type DefaultArguments: dict
:param DefaultArguments: The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NonOverridableArguments: dict
:param NonOverridableArguments: Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n
:type Connections: dict
:param Connections: The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry this job if it fails.
:type AllocatedCapacity: integer
:param AllocatedCapacity: This parameter is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job.
:type Tags: dict
:param Tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The unique name that was provided for this job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_ml_transform(Name=None, Description=None, InputRecordTables=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None, Tags=None):
"""
Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.
Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description , in addition to the parameters that you want to use for your algorithm.
You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role , and optionally, AllocatedCapacity , Timeout , and MaxRetries . For more information, see Jobs .
See also: AWS API Documentation
Exceptions
:example: response = client.create_ml_transform(
Name='string',
Description='string',
InputRecordTables=[
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe unique name that you give the transform when you create it.\n
:type Description: string
:param Description: A description of the machine learning transform that is being defined. The default is an empty string.
:type InputRecordTables: list
:param InputRecordTables: [REQUIRED]\nA list of AWS Glue table definitions used by the transform.\n\n(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.\n\nDatabaseName (string) -- [REQUIRED]A database name in the AWS Glue Data Catalog.\n\nTableName (string) -- [REQUIRED]A table name in the AWS Glue Data Catalog.\n\nCatalogId (string) --A unique identifier for the AWS Glue Data Catalog.\n\nConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.\n\n\n\n\n
:type Parameters: dict
:param Parameters: [REQUIRED]\nThe algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.\n\nThis role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\n\n
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\n
:type Timeout: integer
:param Timeout: The timeout of the task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:type Tags: dict
:param Tags: The tags to use with this machine learning transform. You may use tags to limit access to the machine learning transform. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
A unique identifier that is generated for the transform.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
"""
pass
def create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInput=None):
"""
Creates a new partition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The AWS account ID of the catalog in which the partition is to be created.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nA PartitionInput structure defining the partition to be created.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_script(DagNodes=None, DagEdges=None, Language=None):
"""
Transforms a directed acyclic graph (DAG) into code.
See also: AWS API Documentation
Exceptions
:example: response = client.create_script(
DagNodes=[
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
DagEdges=[
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
],
Language='PYTHON'|'SCALA'
)
:type DagNodes: list
:param DagNodes: A list of the nodes in the DAG.\n\n(dict) --Represents a node in a directed acyclic graph (DAG)\n\nId (string) -- [REQUIRED]A node identifier that is unique within the node\'s graph.\n\nNodeType (string) -- [REQUIRED]The type of node that this is.\n\nArgs (list) -- [REQUIRED]Properties of the node, in the form of name-value pairs.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nLineNumber (integer) --The line number of the node.\n\n\n\n\n
:type DagEdges: list
:param DagEdges: A list of the edges in the DAG.\n\n(dict) --Represents a directional edge in a directed acyclic graph (DAG).\n\nSource (string) -- [REQUIRED]The ID of the node at which the edge starts.\n\nTarget (string) -- [REQUIRED]The ID of the node at which the edge ends.\n\nTargetParameter (string) --The target of the edge.\n\n\n\n\n
:type Language: string
:param Language: The programming language of the resulting code from the DAG.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
The Python script generated from the DAG.
ScalaCode (string) --
The Scala code generated from the DAG.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def create_security_configuration(Name=None, EncryptionConfiguration=None):
"""
Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints .
See also: AWS API Documentation
Exceptions
:example: response = client.create_security_configuration(
Name='string',
EncryptionConfiguration={
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name for the new security configuration.\n
:type EncryptionConfiguration: dict
:param EncryptionConfiguration: [REQUIRED]\nThe encryption configuration for the new security configuration.\n\nS3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\n\n(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\n\nS3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\nCloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.\n\nCloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\nJobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.\n\nJobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Name (string) --
The name assigned to the new security configuration.
CreatedTimestamp (datetime) --
The time at which the new security configuration was created.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
"""
pass
def create_table(CatalogId=None, DatabaseName=None, TableInput=None):
"""
Creates a new table definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the Table . If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nThe TableInput object that defines the metadata table to create in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_trigger(Name=None, WorkflowName=None, Type=None, Schedule=None, Predicate=None, Actions=None, Description=None, StartOnCreation=None, Tags=None):
"""
Creates a new trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.create_trigger(
Name='string',
WorkflowName='string',
Type='SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
Schedule='string',
Predicate={
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
},
Actions=[
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
Description='string',
StartOnCreation=True|False,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger.\n
:type WorkflowName: string
:param WorkflowName: The name of the workflow associated with the trigger.
:type Type: string
:param Type: [REQUIRED]\nThe type of the new trigger.\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\nThis field is required when the trigger type is SCHEDULED.\n
:type Predicate: dict
:param Predicate: A predicate to specify when the new trigger should fire.\nThis field is required when the trigger type is CONDITIONAL .\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n
:type Actions: list
:param Actions: [REQUIRED]\nThe actions initiated by this trigger when it fires.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n
:type Description: string
:param Description: A description of the new trigger.
:type StartOnCreation: boolean
:param StartOnCreation: Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.
:type Tags: dict
:param Tags: The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the trigger.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_user_defined_function(CatalogId=None, DatabaseName=None, FunctionInput=None):
"""
Creates a new function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the function. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which to create the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that defines the function to create in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_workflow(Name=None, Description=None, DefaultRunProperties=None, Tags=None):
"""
Creates a new workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.create_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
},
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name to be assigned to the workflow. It should be unique within your account.\n
:type Description: string
:param Description: A description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:type Tags: dict
:param Tags: The tags to be used with this workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was provided as part of the request.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def delete_classifier(Name=None):
"""
Removes a classifier from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_connection(CatalogId=None, ConnectionName=None):
"""
Deletes a connection from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_connection(
CatalogId='string',
ConnectionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type ConnectionName: string
:param ConnectionName: [REQUIRED]\nThe name of the connection to delete.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_crawler(Name=None):
"""
Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler state is RUNNING .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_database(CatalogId=None, Name=None):
"""
Removes a specified database from a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to delete. For Hive compatibility, this must be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_dev_endpoint(EndpointName=None):
"""
Deletes a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint .\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
"""
pass
def delete_job(JobName=None):
"""
Deletes a specified job definition. If the job definition is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --The name of the job definition that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobName': 'string'
}
"""
pass
def delete_ml_transform(TransformId=None):
"""
Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms . However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform that was deleted.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string'
}
"""
pass
def delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Deletes a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partition to be deleted.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_resource_policy(PolicyHashCondition=None):
"""
Deletes a specified policy.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_resource_policy(
PolicyHashCondition='string'
)
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when this policy was set.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def delete_security_configuration(Name=None):
"""
Deletes a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Removes a table definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Deletes a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: [REQUIRED]\nThe ID of the table version to be deleted. A VersionID is a string representation of an integer. Each version is incremented by 1.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_trigger(Name=None):
"""
Deletes a specified trigger. If the trigger is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def delete_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Deletes an existing function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function definition to be deleted.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_workflow(Name=None):
"""
Deletes a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_workflow(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be deleted.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --Name of the workflow specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_catalog_import_status(CatalogId=None):
"""
Retrieves the status of a migration operation.
See also: AWS API Documentation
Exceptions
:example: response = client.get_catalog_import_status(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to migrate. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
Response Structure
(dict) --
ImportStatus (dict) --The status of the specified catalog migration.
ImportCompleted (boolean) --
True if the migration has completed, or False otherwise.
ImportTime (datetime) --The time that the migration was started.
ImportedBy (string) --The name of the person who initiated the migration.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
"""
pass
def get_classifier(Name=None):
"""
Retrieve a classifier by name.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
Response Structure
(dict) --
Classifier (dict) --The requested classifier.
GrokClassifier (dict) --A classifier that uses grok .
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
GrokPattern (string) --The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --A classifier for XML content.
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
RowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --A classifier for JSON content.
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
JsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --A classifier for comma-separated values (CSV).
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
Delimiter (string) --A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --Indicates whether the CSV file contains a header.
Header (list) --A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --Enables the processing of files that contain only one column.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_classifiers(MaxResults=None, NextToken=None):
"""
Lists all classifier objects in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifiers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The size of the list to return (optional).
:type NextToken: string
:param NextToken: An optional continuation token.
:rtype: dict
ReturnsResponse Syntax
{
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Classifiers (list) --
The requested list of classifier objects.
(dict) --
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.
You can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.
GrokClassifier (dict) --
A classifier that uses grok .
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
GrokPattern (string) --
The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --
Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --
A classifier for XML content.
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
RowTag (string) --
The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --
A classifier for JSON content.
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
JsonPath (string) --
A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --
A classifier for comma-separated values (CSV).
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
Delimiter (string) --
A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --
A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --
Indicates whether the CSV file contains a header.
Header (list) --
A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --
Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --
Enables the processing of files that contain only one column.
NextToken (string) --
A continuation token.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_connection(CatalogId=None, Name=None, HidePassword=None):
"""
Retrieves a connection definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connection(
CatalogId='string',
Name='string',
HidePassword=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to retrieve.\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:rtype: dict
ReturnsResponse Syntax
{
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
Response Structure
(dict) --
Connection (dict) --
The requested connection definition.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
:returns:
(string) --
"""
pass
def get_connections(CatalogId=None, Filter=None, HidePassword=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connections(
CatalogId='string',
Filter={
'MatchCriteria': [
'string',
],
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA'
},
HidePassword=True|False,
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type Filter: dict
:param Filter: A filter that controls which connections are returned.\n\nMatchCriteria (list) --A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.\n\n(string) --\n\n\nConnectionType (string) --The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.\n\n\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of connections to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
ConnectionList (list) --
A list of requested connection definitions.
(dict) --
Defines a connection to a data source.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
NextToken (string) --
A continuation token, if the list of connections returned does not include the last of the filtered connections.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_crawler(Name=None):
"""
Retrieves metadata for a specified crawler.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to retrieve metadata for.\n
:rtype: dict
ReturnsResponse Syntax{
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
Response Structure
(dict) --
Crawler (dict) --The metadata for the specified crawler.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
:returns:
(string) --
"""
pass
def get_crawler_metrics(CrawlerNameList=None, MaxResults=None, NextToken=None):
"""
Retrieves metrics about specified crawlers.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler_metrics(
CrawlerNameList=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type CrawlerNameList: list
:param CrawlerNameList: A list of the names of crawlers about which to retrieve metrics.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerMetricsList (list) --
A list of metrics for the specified crawler.
(dict) --
Metrics for a specified crawler.
CrawlerName (string) --
The name of the crawler.
TimeLeftSeconds (float) --
The estimated time left to complete a running crawl.
StillEstimating (boolean) --
True if the crawler is still estimating how long it will take to complete this run.
LastRuntimeSeconds (float) --
The duration of the crawler\'s most recent run, in seconds.
MedianRuntimeSeconds (float) --
The median duration of this crawler\'s runs, in seconds.
TablesCreated (integer) --
The number of tables created by this crawler.
TablesUpdated (integer) --
The number of tables updated by this crawler.
TablesDeleted (integer) --
The number of tables deleted by this crawler.
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_crawlers(MaxResults=None, NextToken=None):
"""
Retrieves metadata for all crawlers defined in the customer account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawlers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The number of crawlers to return on each call.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:rtype: dict
ReturnsResponse Syntax
{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Crawlers (list) --
A list of crawler metadata.
(dict) --
Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --
The name of the crawler.
Role (string) --
The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --
A collection of targets to crawl.
S3Targets (list) --
Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --
Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --
The path to the Amazon S3 target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --
Specifies JDBC targets.
(dict) --
Specifies a JDBC data store to crawl.
ConnectionName (string) --
The name of the connection to use to connect to the JDBC target.
Path (string) --
The path of the JDBC target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --
Specifies Amazon DynamoDB targets.
(dict) --
Specifies an Amazon DynamoDB table to crawl.
Path (string) --
The name of the DynamoDB table to crawl.
CatalogTargets (list) --
Specifies AWS Glue Data Catalog targets.
(dict) --
Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --
The name of the database to be synchronized.
Tables (list) --
A list of the tables to be synchronized.
(string) --
DatabaseName (string) --
The name of the database in which the crawler\'s output is stored.
Description (string) --
A description of the crawler.
Classifiers (list) --
A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --
The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --
The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --
The deletion behavior when the crawler finds a deleted object.
State (string) --
Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --
The prefix added to the names of tables that are created.
Schedule (dict) --
For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --
A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --
The state of the schedule.
CrawlElapsedTime (integer) --
If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --
The time that the crawler was created.
LastUpdated (datetime) --
The time that the crawler was last updated.
LastCrawl (dict) --
The status of the last crawl, and potentially error information if an error occurred.
Status (string) --
Status of the last crawl.
ErrorMessage (string) --
If an error occurred, the error information about the last crawl.
LogGroup (string) --
The log group for the last crawl.
LogStream (string) --
The log stream for the last crawl.
MessagePrefix (string) --
The prefix for a message about this crawl.
StartTime (datetime) --
The time at which the crawl started.
Version (integer) --
The version of the crawler.
Configuration (string) --
Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used by this crawler.
NextToken (string) --
A continuation token, if the returned list has not reached the end of those defined in this customer account.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_data_catalog_encryption_settings(CatalogId=None):
"""
Retrieves the security configuration for a specified catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_data_catalog_encryption_settings(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to retrieve the security configuration for. If none is provided, the AWS account ID is used by default.
:rtype: dict
ReturnsResponse Syntax{
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
Response Structure
(dict) --
DataCatalogEncryptionSettings (dict) --The requested security configuration.
EncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.
CatalogEncryptionMode (string) --The encryption-at-rest mode for encrypting Data Catalog data.
SseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.
ConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.
ReturnConnectionPasswordEncrypted (boolean) --When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.
AwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.
If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.
You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
"""
pass
def get_database(CatalogId=None, Name=None):
"""
Retrieves the definition of a specified database.
See also: AWS API Documentation
Exceptions
:example: response = client.get_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to retrieve. For Hive compatibility, this should be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
Response Structure
(dict) --
Database (dict) --
The definition of the specified database in the Data Catalog.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
:returns:
(string) --
(string) --
"""
pass
def get_databases(CatalogId=None, NextToken=None, MaxResults=None):
"""
Retrieves all databases defined in a given Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_databases(
CatalogId='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog from which to retrieve Databases . If none is provided, the AWS account ID is used by default.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of databases to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DatabaseList (list) --
A list of Database objects from the specified catalog.
(dict) --
The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
NextToken (string) --
A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_dataflow_graph(PythonScript=None):
"""
Transforms a Python script into a directed acyclic graph (DAG).
See also: AWS API Documentation
Exceptions
:example: response = client.get_dataflow_graph(
PythonScript='string'
)
:type PythonScript: string
:param PythonScript: The Python script to transform.
:rtype: dict
ReturnsResponse Syntax{
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
Response Structure
(dict) --
DagNodes (list) --A list of the nodes in the resulting DAG.
(dict) --Represents a node in a directed acyclic graph (DAG)
Id (string) --A node identifier that is unique within the node\'s graph.
NodeType (string) --The type of node that this is.
Args (list) --Properties of the node, in the form of name-value pairs.
(dict) --An argument or property of a node.
Name (string) --The name of the argument or property.
Value (string) --The value of the argument or property.
Param (boolean) --True if the value is used as a parameter.
LineNumber (integer) --The line number of the node.
DagEdges (list) --A list of the edges in the resulting DAG.
(dict) --Represents a directional edge in a directed acyclic graph (DAG).
Source (string) --The ID of the node at which the edge starts.
Target (string) --The ID of the node at which the edge ends.
TargetParameter (string) --The target of the edge.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
"""
pass
def get_dev_endpoint(EndpointName=None):
"""
Retrieves information about a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nName of the DevEndpoint to retrieve information for.\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
Response Structure
(dict) --
DevEndpoint (dict) --A DevEndpoint definition.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
:returns:
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
"""
pass
def get_dev_endpoints(MaxResults=None, NextToken=None):
"""
Retrieves all the development endpoints in this AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoints(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum size of information to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpoints (list) --
A list of DevEndpoint definitions.
(dict) --
A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --
The name of the DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --
A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --
The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --
A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --
The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --
The current status of this DevEndpoint .
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --
The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --
The status of the last update.
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --
The point in time at which this DevEndpoint was last modified.
PublicKey (string) --
The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --
A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --
A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
NextToken (string) --
A continuation token, if not all DevEndpoint definitions have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_job(JobName=None):
"""
Retrieves an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
Job (dict) --The requested job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_bookmark(JobName=None, RunId=None):
"""
Returns information on a job bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
A structure that defines a point that a job can resume processing.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
"""
pass
def get_job_run(JobName=None, RunId=None, PredecessorsIncluded=None):
"""
Retrieves the metadata for a given job run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_run(
JobName='string',
RunId='string',
PredecessorsIncluded=True|False
)
:type JobName: string
:param JobName: [REQUIRED]\nName of the job definition being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the job run.\n
:type PredecessorsIncluded: boolean
:param PredecessorsIncluded: True if a list of predecessor runs should be returned.
:rtype: dict
ReturnsResponse Syntax
{
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
JobRun (dict) --
The requested job-run metadata.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_runs(JobName=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_runs(
JobName='string',
NextToken='string',
MaxResults=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to retrieve all job runs.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobRuns (list) --
A list of job-run metadata objects.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all requested job runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_jobs(NextToken=None, MaxResults=None):
"""
Retrieves all current job definitions.
See also: AWS API Documentation
Exceptions
:example: response = client.get_jobs(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Jobs (list) --
A list of job definitions.
(dict) --
Specifies a job definition.
Name (string) --
The name you assign to this job definition.
Description (string) --
A description of the job.
LogUri (string) --
This field is reserved for future use.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --
The time and date that this job definition was created.
LastModifiedOn (datetime) --
The last point in time when this job definition was modified.
ExecutionProperty (dict) --
An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --
The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --
The JobCommand that executes this job.
Name (string) --
The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --
Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --
The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --
The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --
Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --
The connections used for this job.
Connections (list) --
A list of connections used by the job.
(string) --
MaxRetries (integer) --
The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --
The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --
Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all job definitions have yet been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_mapping(Source=None, Sinks=None, Location=None):
"""
Creates mappings.
See also: AWS API Documentation
Exceptions
:example: response = client.get_mapping(
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
}
)
:type Source: dict
:param Source: [REQUIRED]\nSpecifies the source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: A list of target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: Parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
Response Structure
(dict) --
Mapping (list) --
A list of mappings to the specified targets.
(dict) --
Defines a mapping.
SourceTable (string) --
The name of the source table.
SourcePath (string) --
The source path.
SourceType (string) --
The source type.
TargetTable (string) --
The target table.
TargetPath (string) --
The target path.
TargetType (string) --
The target type.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_ml_task_run(TransformId=None, TaskRunId=None):
"""
Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform\'s TransformID .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nThe unique identifier of the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the task run.
TaskRunId (string) --
The unique run identifier associated with this run.
Status (string) --
The status for this task run.
LogGroupName (string) --
The names of the log groups that are associated with the task run.
Properties (dict) --
The list of properties that are associated with the task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The error strings that are associated with the task run.
StartedOn (datetime) --
The date and time when this task run started.
LastModifiedOn (datetime) --
The date and time when this task run was last modified.
CompletedOn (datetime) --
The date and time when this task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_task_runs(TransformId=None, NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform\'s TransformID and other optional parameters as documented in this section.
This operation returns a list of historic runs and must be paginated.
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_runs(
TransformId='string',
NextToken='string',
MaxResults=123,
Filter={
'TaskRunType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'StartedBefore': datetime(2015, 1, 1),
'StartedAfter': datetime(2015, 1, 1)
},
Sort={
'Column': 'TASK_RUN_TYPE'|'STATUS'|'STARTED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type NextToken: string
:param NextToken: A token for pagination of the results. The default is empty.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter criteria, in the TaskRunFilterCriteria structure, for the task run.\n\nTaskRunType (string) --The type of task run.\n\nStatus (string) --The current status of the task run.\n\nStartedBefore (datetime) --Filter on task runs started before this date.\n\nStartedAfter (datetime) --Filter on task runs started after this date.\n\n\n
:type Sort: dict
:param Sort: The sorting criteria, in the TaskRunSortCriteria structure, for the task run.\n\nColumn (string) -- [REQUIRED]The column to be used to sort the list of task runs for the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used to sort the list of task runs for the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TaskRuns (list) --
A list of task runs that are associated with the transform.
(dict) --
The sampling parameters that are associated with the machine learning transform.
TransformId (string) --
The unique identifier for the transform.
TaskRunId (string) --
The unique identifier for this task run.
Status (string) --
The current status of the requested task run.
LogGroupName (string) --
The names of the log group for secure logging, associated with this task run.
Properties (dict) --
Specifies configuration properties associated with this task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The list of error strings associated with this task run.
StartedOn (datetime) --
The date and time that this task run started.
LastModifiedOn (datetime) --
The last point in time that the requested task run was updated.
CompletedOn (datetime) --
The last point in time that the requested task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transform(TransformId=None):
"""
Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform, generated at the time that the transform was created.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform, generated at the time that the transform was created.
Name (string) --The unique name given to the transform when it was created.
Description (string) --A description of the transform.
Status (string) --The last known status of the transform (to indicate whether it can be used or not). One of "NOT_READY", "READY", or "DELETING".
CreatedOn (datetime) --The date and time when the transform was created.
LastModifiedOn (datetime) --The date and time when the transform was last modified.
InputRecordTables (list) --A list of AWS Glue table definitions used by the transform.
(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --A database name in the AWS Glue Data Catalog.
TableName (string) --A table name in the AWS Glue Data Catalog.
CatalogId (string) --A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --The configuration parameters that are specific to the algorithm used.
TransformType (string) --The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --The latest evaluation metrics.
TransformType (string) --The type of machine learning transform.
FindMatchesMetrics (dict) --The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --The number of labels available for this transform.
Schema (list) --The Map<Column, Type> object that represents the schema that this transform accepts. Has an upper bound of 100 columns.
(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --The name of the column.
DataType (string) --The type of data in the column.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
GlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when this task runs.
Timeout (integer) --The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxRetries (integer) --The maximum number of times to retry a task for this transform after a task run fails.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type NextToken: string
:param NextToken: A paginated token to offset the results.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter transformation criteria.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: The sorting criteria.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Transforms (list) --
A list of machine learning transforms.
(dict) --
A structure for a machine learning transform.
TransformId (string) --
The unique transform ID that is generated for the machine learning transform. The ID is guaranteed to be unique and does not change.
Name (string) --
A user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.
Description (string) --
A user-defined, long-form description text for the machine learning transform. Descriptions are not guaranteed to be unique and can be changed at any time.
Status (string) --
The current status of the machine learning transform.
CreatedOn (datetime) --
A timestamp. The time and date that this machine learning transform was created.
LastModifiedOn (datetime) --
A timestamp. The last point in time when this machine learning transform was modified.
InputRecordTables (list) --
A list of AWS Glue table definitions used by the transform.
(dict) --
The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --
A database name in the AWS Glue Data Catalog.
TableName (string) --
A table name in the AWS Glue Data Catalog.
CatalogId (string) --
A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --
The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --
A TransformParameters object. You can use parameters to tune (customize) the behavior of the machine learning transform by specifying what data it learns from and your preference on various tradeoffs (such as precious vs. recall, or accuracy vs. cost).
TransformType (string) --
The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --
The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --
The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --
The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --
The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --
The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --
An EvaluationMetrics object. Evaluation metrics provide an estimate of the quality of your machine learning transform.
TransformType (string) --
The type of machine learning transform.
FindMatchesMetrics (dict) --
The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --
The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --
The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --
The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --
The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --
The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --
The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --
The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --
The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --
The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --
A count identifier for the labeling files generated by AWS Glue for this transform. As you create a better transform, you can iteratively download, label, and upload the labeling file.
Schema (list) --
A map of key-value pairs representing the columns and data types that this transform can run against. Has an upper bound of 100 columns.
(dict) --
A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --
The name of the column.
DataType (string) --
The type of data in the column.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
GlueVersion (string) --
This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a task of the transform runs.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
Timeout (integer) --
The timeout in minutes of the machine learning transform.
MaxRetries (integer) --
The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
:returns:
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Retrieves information about a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition in question resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partition resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partition\'s table.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Partition (dict) --
The requested information, in the form of a Partition object.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
:returns:
(string) --
"""
pass
def get_partitions(CatalogId=None, DatabaseName=None, TableName=None, Expression=None, NextToken=None, Segment=None, MaxResults=None):
"""
Retrieves information about the partitions in a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partitions(
CatalogId='string',
DatabaseName='string',
TableName='string',
Expression='string',
NextToken='string',
Segment={
'SegmentNumber': 123,
'TotalSegments': 123
},
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type Expression: string
:param Expression: An expression that filters the partitions to be returned.\nThe expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.\n\nOperators : The following are the operators that you can use in the Expression API call:\n=\n\nChecks whether the values of the two operands are equal; if yes, then the condition becomes true.\nExample: Assume \'variable a\' holds 10 and \'variable b\' holds 20.\n(a = b) is not true.\n\n< >\nChecks whether the values of two operands are equal; if the values are not equal, then the condition becomes true.\nExample: (a < > b) is true.\n\n>\nChecks whether the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.\nExample: (a > b) is not true.\n\n<\nChecks whether the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.\nExample: (a < b) is true.\n\n>=\nChecks whether the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a >= b) is not true.\n\n<=\nChecks whether the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a <= b) is true.\n\nAND, OR, IN, BETWEEN, LIKE, NOT, IS NULL\nLogical operators.\n\nSupported Partition Key Types : The following are the supported partition keys.\n\nstring\ndate\ntimestamp\nint\nbigint\nlong\ntinyint\nsmallint\ndecimal\n\nIf an invalid type is encountered, an exception is thrown.\nThe following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING , to be compatible with the catalog partitions.\n\nSample API Call :\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call to retrieve these partitions.
:type Segment: dict
:param Segment: The segment of the table\'s partitions to scan in this request.\n\nSegmentNumber (integer) -- [REQUIRED]The zero-based index number of the segment. For example, if the total number of segments is 4, SegmentNumber values range from 0 through 3.\n\nTotalSegments (integer) -- [REQUIRED]The total number of segments.\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of partitions to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Partitions (list) --
A list of requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
NextToken (string) --
A continuation token, if the returned list of partitions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_plan(Mapping=None, Source=None, Sinks=None, Location=None, Language=None):
"""
Gets code to perform a specified mapping.
See also: AWS API Documentation
Exceptions
:example: response = client.get_plan(
Mapping=[
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
],
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
},
Language='PYTHON'|'SCALA'
)
:type Mapping: list
:param Mapping: [REQUIRED]\nThe list of mappings from a source table to target tables.\n\n(dict) --Defines a mapping.\n\nSourceTable (string) --The name of the source table.\n\nSourcePath (string) --The source path.\n\nSourceType (string) --The source type.\n\nTargetTable (string) --The target table.\n\nTargetPath (string) --The target path.\n\nTargetType (string) --The target type.\n\n\n\n\n
:type Source: dict
:param Source: [REQUIRED]\nThe source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: The target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: The parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:type Language: string
:param Language: The programming language of the code to perform the mapping.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
A Python script to perform the mapping.
ScalaCode (string) --
The Scala code to perform the mapping.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_resource_policy():
"""
Retrieves a specified resource policy.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resource_policy()
:rtype: dict
ReturnsResponse Syntax{
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
Response Structure
(dict) --
PolicyInJson (string) --Contains the requested policy document, in JSON format.
PolicyHash (string) --Contains the hash value associated with this policy.
CreateTime (datetime) --The date and time at which the policy was created.
UpdateTime (datetime) --The date and time at which the policy was last updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
"""
pass
def get_security_configuration(Name=None):
"""
Retrieves a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
Response Structure
(dict) --
SecurityConfiguration (dict) --The requested security configuration.
Name (string) --The name of the security configuration.
CreatedTimeStamp (datetime) --The time at which this security configuration was created.
EncryptionConfiguration (dict) --The encryption configuration associated with this security configuration.
S3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
"""
pass
def get_security_configurations(MaxResults=None, NextToken=None):
"""
Retrieves a list of all security configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configurations(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
SecurityConfigurations (list) --
A list of security configurations.
(dict) --
Specifies a security configuration.
Name (string) --
The name of the security configuration.
CreatedTimeStamp (datetime) --
The time at which this security configuration was created.
EncryptionConfiguration (dict) --
The encryption configuration associated with this security configuration.
S3Encryption (list) --
The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --
Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --
The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --
The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --
The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --
The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --
The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
NextToken (string) --
A continuation token, if there are more security configurations to return.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Retrieves the Table definition in a Data Catalog for a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
Response Structure
(dict) --
Table (dict) --
The Table object that defines the specified table.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Retrieves a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: The ID value of the table version to be retrieved. A VersionID is a string representation of an integer. Each version is incremented by 1.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
Response Structure
(dict) --
TableVersion (dict) --
The requested table version.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_versions(CatalogId=None, DatabaseName=None, TableName=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of strings that identify available versions of a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_versions(
CatalogId='string',
DatabaseName='string',
TableName='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call.
:type MaxResults: integer
:param MaxResults: The maximum number of table versions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableVersions (list) --
A list of strings identifying available versions of the specified table.
(dict) --
Specifies a version of a table.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
NextToken (string) --
A continuation token, if the list of available versions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tables(CatalogId=None, DatabaseName=None, Expression=None, NextToken=None, MaxResults=None):
"""
Retrieves the definitions of some or all of the tables in a given Database .
See also: AWS API Documentation
Exceptions
:example: response = client.get_tables(
CatalogId='string',
DatabaseName='string',
Expression='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.\n
:type Expression: string
:param Expression: A regular expression pattern. If present, only those tables whose names match the pattern are returned.
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableList (list) --
A list of the requested Table objects.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
NextToken (string) --
A continuation token, present if the current list segment is not the last.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tags(ResourceArn=None):
"""
Retrieves a list of tags associated with a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.get_tags(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource for which to retrieve tags.\n
:rtype: dict
ReturnsResponse Syntax{
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --
Tags (dict) --The requested tags.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Tags': {
'string': 'string'
}
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_trigger(Name=None):
"""
Retrieves the definition of a trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.get_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --The requested trigger definition.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_triggers(NextToken=None, DependentJobName=None, MaxResults=None):
"""
Gets all the triggers associated with a job.
See also: AWS API Documentation
Exceptions
:example: response = client.get_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type DependentJobName: string
:param DependentJobName: The name of the job to retrieve triggers for. The trigger that can start this job is returned, and if there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Triggers (list) --
A list of triggers for the specified job.
(dict) --
Information about a specific trigger.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
NextToken (string) --
A continuation token, if not all the requested triggers have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Retrieves a specified function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
Response Structure
(dict) --
UserDefinedFunction (dict) --
The requested function definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_user_defined_functions(CatalogId=None, DatabaseName=None, Pattern=None, NextToken=None, MaxResults=None):
"""
Retrieves multiple function definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_functions(
CatalogId='string',
DatabaseName='string',
Pattern='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the functions to be retrieved are located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: The name of the catalog database where the functions are located.
:type Pattern: string
:param Pattern: [REQUIRED]\nAn optional function-name pattern string that filters the function definitions returned.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of functions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
UserDefinedFunctions (list) --
A list of requested function definitions.
(dict) --
Represents the equivalent of a Hive user-defined function (UDF ) definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
NextToken (string) --
A continuation token, if the list of functions returned does not include the last requested function.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def get_workflow(Name=None, IncludeGraph=None):
"""
Retrieves resource metadata for a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow(
Name='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to retrieve.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Workflow (dict) --
The resource metadata for the workflow.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run(Name=None, RunId=None, IncludeGraph=None):
"""
Retrieves the metadata for a given workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run(
Name='string',
RunId='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:rtype: dict
ReturnsResponse Syntax
{
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Run (dict) --
The requested workflow run metadata.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run_properties(Name=None, RunId=None):
"""
Retrieves the workflow run properties which were set during the run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run_properties(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run whose run properties should be returned.\n
:rtype: dict
ReturnsResponse Syntax
{
'RunProperties': {
'string': 'string'
}
}
Response Structure
(dict) --
RunProperties (dict) --
The workflow run properties which were set during the specified run.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'RunProperties': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_runs(Name=None, IncludeGraph=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_runs(
Name='string',
IncludeGraph=True|False,
NextToken='string',
MaxResults=123
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow whose metadata of runs should be returned.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:type NextToken: string
:param NextToken: The maximum size of the response.
:type MaxResults: integer
:param MaxResults: The maximum number of workflow runs to be included in the response.
:rtype: dict
ReturnsResponse Syntax
{
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Runs (list) --
A list of workflow run metadata objects.
(dict) --
A workflow run is an execution of a workflow providing all the runtime information.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
NextToken (string) --
A continuation token, if not all requested workflow runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def import_catalog_to_glue(CatalogId=None):
"""
Imports an existing Amazon Athena Data Catalog to AWS Glue
See also: AWS API Documentation
Exceptions
:example: response = client.import_catalog_to_glue(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to import. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def list_crawlers(MaxResults=None, NextToken=None, Tags=None):
"""
Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_crawlers(
MaxResults=123,
NextToken='string',
Tags={
'string': 'string'
}
)
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerNames (list) --
The names of all crawlers in the account, or the crawlers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_dev_endpoints(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_dev_endpoints(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpointNames (list) --
The names of all the DevEndpoint s in the account, or the DevEndpoint s with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_jobs(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_jobs(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobNames (list) --
The names of all jobs in the account, or the jobs with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None, Tags=None):
"""
Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
},
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Filter: dict
:param Filter: A TransformFilterCriteria used to filter the machine learning transforms.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: A TransformSortCriteria used to sort the machine learning transforms.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformIds': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TransformIds (list) --
The identifiers of all the machine learning transforms in the account, or the machine learning transforms with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformIds': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_triggers(NextToken=None, DependentJobName=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type DependentJobName: string
:param DependentJobName: The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TriggerNames (list) --
The names of all triggers in the account, or the triggers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_workflows(NextToken=None, MaxResults=None):
"""
Lists names of workflows created in the account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_workflows(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
Workflows (list) --
List of names of workflows in the account.
(string) --
NextToken (string) --
A continuation token, if not all workflow names have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflows': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def put_data_catalog_encryption_settings(CatalogId=None, DataCatalogEncryptionSettings=None):
"""
Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.
See also: AWS API Documentation
Exceptions
:example: response = client.put_data_catalog_encryption_settings(
CatalogId='string',
DataCatalogEncryptionSettings={
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default.
:type DataCatalogEncryptionSettings: dict
:param DataCatalogEncryptionSettings: [REQUIRED]\nThe security configuration to set.\n\nEncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.\n\nCatalogEncryptionMode (string) -- [REQUIRED]The encryption-at-rest mode for encrypting Data Catalog data.\n\nSseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\n\n\n\nConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\n\nReturnConnectionPasswordEncrypted (boolean) -- [REQUIRED]When the ReturnConnectionPasswordEncrypted flag is set to 'true', passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\n\nAwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.\nIf connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.\nYou can set the decrypt permission to enable or restrict access on the password key according to your security requirements.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def put_resource_policy(PolicyInJson=None, PolicyHashCondition=None, PolicyExistsCondition=None):
"""
Sets the Data Catalog resource policy for access control.
See also: AWS API Documentation
Exceptions
:example: response = client.put_resource_policy(
PolicyInJson='string',
PolicyHashCondition='string',
PolicyExistsCondition='MUST_EXIST'|'NOT_EXIST'|'NONE'
)
:type PolicyInJson: string
:param PolicyInJson: [REQUIRED]\nContains the policy document to set, in JSON format.\n
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when the previous policy was set using PutResourcePolicy . Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.
:type PolicyExistsCondition: string
:param PolicyExistsCondition: A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.
:rtype: dict
ReturnsResponse Syntax
{
'PolicyHash': 'string'
}
Response Structure
(dict) --
PolicyHash (string) --
A hash of the policy that has just been set. This must be included in a subsequent call that overwrites or updates this policy.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {
'PolicyHash': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def put_workflow_run_properties(Name=None, RunId=None, RunProperties=None):
"""
Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.
See also: AWS API Documentation
Exceptions
:example: response = client.put_workflow_run_properties(
Name='string',
RunId='string',
RunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run for which the run properties should be updated.\n
:type RunProperties: dict
:param RunProperties: [REQUIRED]\nThe properties to put for the specified run.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def reset_job_bookmark(JobName=None, RunId=None):
"""
Resets a bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.reset_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
The reset bookmark entry.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def search_tables(CatalogId=None, NextToken=None, Filters=None, SearchText=None, SortCriteria=None, MaxResults=None):
"""
Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.
You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.
See also: AWS API Documentation
Exceptions
:example: response = client.search_tables(
CatalogId='string',
NextToken='string',
Filters=[
{
'Key': 'string',
'Value': 'string',
'Comparator': 'EQUALS'|'GREATER_THAN'|'LESS_THAN'|'GREATER_THAN_EQUALS'|'LESS_THAN_EQUALS'
},
],
SearchText='string',
SortCriteria=[
{
'FieldName': 'string',
'Sort': 'ASC'|'DESC'
},
],
MaxResults=123
)
:type CatalogId: string
:param CatalogId: A unique identifier, consisting of `` account_id /datalake`` .
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type Filters: list
:param Filters: A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.\n\n(dict) --Defines a property predicate.\n\nKey (string) --The key of the property.\n\nValue (string) --The value of the property.\n\nComparator (string) --The comparator used to compare this property to others.\n\n\n\n\n
:type SearchText: string
:param SearchText: A string used for a text search.\nSpecifying a value in quotes filters based on an exact match to the value.\n
:type SortCriteria: list
:param SortCriteria: A list of criteria for sorting the results by a field name, in an ascending or descending order.\n\n(dict) --Specifies a field to sort by and a sort order.\n\nFieldName (string) --The name of the field on which to sort.\n\nSort (string) --An ascending or descending sort.\n\n\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
Response Structure
(dict) --
NextToken (string) --
A continuation token, present if the current list segment is not the last.
TableList (list) --
A list of the requested Table objects. The SearchTables response returns only the tables that you have access to.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
:returns:
(string) --
(string) --
"""
pass
def start_crawler(Name=None):
"""
Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to start.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_crawler_schedule(CrawlerName=None):
"""
Changes the schedule state of the specified crawler to SCHEDULED , unless the crawler is already running or the schedule state is already SCHEDULED .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler to schedule.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_export_labels_task_run(TransformId=None, OutputS3Path=None):
"""
Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId . You can check on the status of your task run by calling the GetMLTaskRun API.
See also: AWS API Documentation
Exceptions
:example: response = client.start_export_labels_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon S3 path where you export the labels.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_import_labels_task_run(TransformId=None, InputS3Path=None, ReplaceAllLabels=None):
"""
Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.
After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called \'labeling\' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d After the labeling process is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.
By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.
You can check on the status of your task run by calling the GetMLTaskRun operation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_import_labels_task_run(
TransformId='string',
InputS3Path='string',
ReplaceAllLabels=True|False
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type InputS3Path: string
:param InputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path from where you import the labels.\n
:type ReplaceAllLabels: boolean
:param ReplaceAllLabels: Indicates whether to overwrite your existing labels.
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_job_run(JobName=None, JobRunId=None, Arguments=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, NotificationProperty=None, WorkerType=None, NumberOfWorkers=None):
"""
Starts a job run using a job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.start_job_run(
JobName='string',
JobRunId='string',
Arguments={
'string': 'string'
},
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
NotificationProperty={
'NotifyDelayAfter': 123
},
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to use.\n
:type JobRunId: string
:param JobRunId: The ID of a previous JobRun to retry.
:type Arguments: dict
:param Arguments: The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type AllocatedCapacity: integer
:param AllocatedCapacity: This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job run.
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:rtype: dict
ReturnsResponse Syntax
{
'JobRunId': 'string'
}
Response Structure
(dict) --
JobRunId (string) --
The ID assigned to this job run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'JobRunId': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_ml_evaluation_task_run(TransformId=None):
"""
Starts a task to estimate the quality of the transform.
When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.
Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun .
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_evaluation_task_run(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:rtype: dict
ReturnsResponse Syntax{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --The unique identifier associated with this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
Glue.Client.exceptions.MLTransformNotReadyException
:return: {
'TaskRunId': 'string'
}
"""
pass
def start_ml_labeling_set_generation_task_run(TransformId=None, OutputS3Path=None):
"""
Starts the active learning workflow for your machine learning transform to improve the transform\'s quality by generating label sets and adding labels.
When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.
In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d
After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_labeling_set_generation_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path where you generate the labeling set.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique run identifier that is associated with this task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_trigger(Name=None):
"""
Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.
See also: AWS API Documentation
Exceptions
:example: response = client.start_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to start.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was started.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'Name': 'string'
}
"""
pass
def start_workflow_run(Name=None):
"""
Starts a new run of the specified workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.start_workflow_run(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to start.\n
:rtype: dict
ReturnsResponse Syntax{
'RunId': 'string'
}
Response Structure
(dict) --
RunId (string) --An Id for the new run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'RunId': 'string'
}
"""
pass
def stop_crawler(Name=None):
"""
If the specified crawler is running, stops the crawl.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to stop.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_crawler_schedule(CrawlerName=None):
"""
Sets the schedule state of the specified crawler to NOT_SCHEDULED , but does not stop the crawler if it is already running.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler whose schedule state to set.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_trigger(Name=None):
"""
Stops a specified trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to stop.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was stopped.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def stop_workflow_run(Name=None, RunId=None):
"""
Stops the execution of the specified workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_workflow_run(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to stop.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run to stop.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.IllegalWorkflowStateException
:return: {}
:returns:
(dict) --
"""
pass
def tag_resource(ResourceArn=None, TagsToAdd=None):
"""
Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue .
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceArn='string',
TagsToAdd={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN of the AWS Glue resource to which to add the tags. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern .\n
:type TagsToAdd: dict
:param TagsToAdd: [REQUIRED]\nTags to add to this resource.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagsToRemove=None):
"""
Removes tags from a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagsToRemove=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource from which to remove the tags.\n
:type TagsToRemove: list
:param TagsToRemove: [REQUIRED]\nTags to remove from this resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def update_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Modifies an existing classifier (a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field is present).
See also: AWS API Documentation
Exceptions
:example: response = client.update_classifier(
GrokClassifier={
'Name': 'string',
'Classification': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Name': 'string',
'Classification': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the GrokClassifier .\n\nClassification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nGrokPattern (string) --The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nClassification (string) --An identifier of the data format that the classifier matches.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This cannot identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_connection(CatalogId=None, Name=None, ConnectionInput=None):
"""
Updates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_connection(
CatalogId='string',
Name='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to update.\n
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object that redefines the connection in question.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):
"""
Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by the new crawler to access customer resources.
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: A list of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler_schedule(CrawlerName=None, Schedule=None):
"""
Updates the schedule of a crawler using a cron expression.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler_schedule(
CrawlerName='string',
Schedule='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nThe name of the crawler whose schedule to update.\n
:type Schedule: string
:param Schedule: The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_database(CatalogId=None, Name=None, DatabaseInput=None):
"""
Updates an existing database definition in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_database(
CatalogId='string',
Name='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the metadata database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.\n
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nA DatabaseInput object specifying the new definition of the metadata database in the catalog.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_dev_endpoint(EndpointName=None, PublicKey=None, AddPublicKeys=None, DeletePublicKeys=None, CustomLibraries=None, UpdateEtlLibraries=None, DeleteArguments=None, AddArguments=None):
"""
Updates a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.update_dev_endpoint(
EndpointName='string',
PublicKey='string',
AddPublicKeys=[
'string',
],
DeletePublicKeys=[
'string',
],
CustomLibraries={
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string'
},
UpdateEtlLibraries=True|False,
DeleteArguments=[
'string',
],
AddArguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint to be updated.\n
:type PublicKey: string
:param PublicKey: The public key for the DevEndpoint to use.
:type AddPublicKeys: list
:param AddPublicKeys: The list of public keys for the DevEndpoint to use.\n\n(string) --\n\n
:type DeletePublicKeys: list
:param DeletePublicKeys: The list of public keys to be deleted from the DevEndpoint .\n\n(string) --\n\n
:type CustomLibraries: dict
:param CustomLibraries: Custom Python or Java libraries to be loaded in the DevEndpoint .\n\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon Simple Storage Service (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java/Scala libraries with a DevEndpoint .\n\n\n\n
:type UpdateEtlLibraries: boolean
:param UpdateEtlLibraries: True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.
:type DeleteArguments: list
:param DeleteArguments: The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint .\n\n(string) --\n\n
:type AddArguments: dict
:param AddArguments: The map of arguments to add the map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n'--enable-glue-datacatalog': ''\n'GLUE_PYTHON_VERSION': '3'\n'GLUE_PYTHON_VERSION': '2'\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
:return: {}
:returns:
(dict) --
"""
pass
def update_job(JobName=None, JobUpdate=None):
"""
Updates an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_job(
JobName='string',
JobUpdate={
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to update.\n
:type JobUpdate: dict
:param JobUpdate: [REQUIRED]\nSpecifies the values with which to update the job definition.\n\nDescription (string) --Description of the job being defined.\n\nLogUri (string) --This field is reserved for future use.\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required).\n\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --The JobCommand that executes this job (required).\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --The maximum number of times to retry this job if it fails.\n\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --Specifies the configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --
Returns the name of the updated job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'JobName': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def update_ml_transform(TransformId=None, Name=None, Description=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None):
"""
Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).
See also: AWS API Documentation
Exceptions
:example: response = client.update_ml_transform(
TransformId='string',
Name='string',
Description='string',
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123
)
:type TransformId: string
:param TransformId: [REQUIRED]\nA unique identifier that was generated when the transform was created.\n
:type Name: string
:param Name: The unique name that you gave the transform when you created it.
:type Description: string
:param Description: A description of the transform. The default is an empty string.
:type Parameters: dict
:param Parameters: The configuration parameters that are specific to the transform type (algorithm) used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.
:type Timeout: integer
:param Timeout: The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier for the transform that was updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
"""
pass
def update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None):
"""
Updates a partition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValueList=[
'string',
],
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table in which the partition to be updated is located.\n
:type PartitionValueList: list
:param PartitionValueList: [REQUIRED]\nA list of the values defining the partition.\n\n(string) --\n\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nThe new partition object to update the partition to.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_table(CatalogId=None, DatabaseName=None, TableInput=None, SkipArchive=None):
"""
Updates a metadata table in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
},
SkipArchive=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nAn updated TableInput object to define the metadata table in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:type SkipArchive: boolean
:param SkipArchive: By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_trigger(Name=None, TriggerUpdate=None):
"""
Updates a trigger definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_trigger(
Name='string',
TriggerUpdate={
'Name': 'string',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to update.\n
:type TriggerUpdate: dict
:param TriggerUpdate: [REQUIRED]\nThe new values with which to update the trigger.\n\nName (string) --Reserved for future use.\n\nDescription (string) --A description of this trigger.\n\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --The actions initiated by this trigger.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --
The resulting trigger definition.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def update_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None, FunctionInput=None):
"""
Updates an existing function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be updated is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function to be updated is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that redefines the function in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_workflow(Name=None, Description=None, DefaultRunProperties=None):
"""
Updates an existing workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.update_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be updated.\n
:type Description: string
:param Description: The description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
| mit |
alekz112/statsmodels | statsmodels/datasets/tests/test_utils.py | 26 | 1697 | import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
vene/ambra | ambra/cross_validation.py | 1 | 9371 | import numbers
import time
import numpy as np
from sklearn.utils import safe_indexing
from sklearn.base import is_classifier, clone
from sklearn.metrics.scorer import check_scoring
from sklearn.externals.joblib import Parallel, delayed, logger
from ambra.backports import _num_samples, indexable
from sklearn.cross_validation import check_cv
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer, **params):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test, **params)
else:
score = scorer(estimator, X_test, y_test, **params)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
scorer_params=None):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
scorer_params : dict, optional
Parameters to pass to the scorer. Can be used for sample weights
and sample groups.
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y,
scorer, train, test, verbose,
None, fit_params, scorer_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params, scorer_params, return_train_score=False,
return_parameters=False):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like or None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape = (n_train_samples,)
Indices of training samples.
test : array-like, shape = (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
scorer_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust lenght of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
# Same, but take both slices
scorer_params = scorer_params if scorer_params is not None else {}
train_scorer_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__')
and len(v) == n_samples
else v)
for k, v in scorer_params.items()])
test_scorer_params = dict([(k, np.asarray(v)[test]
if hasattr(v, '__len__')
and len(v) == n_samples
else v)
for k, v in scorer_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
test_score = _score(estimator, X_test, y_test, scorer,
**test_scorer_params)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer,
**train_scorer_params)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
| bsd-2-clause |
jlegendary/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
glouppe/scikit-learn | examples/model_selection/plot_roc.py | 49 | 5041 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
dahlstrom-g/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_vars.py | 7 | 26282 | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import math
import pickle
from _pydev_bundle.pydev_imports import quote
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES, NUMPY_FLOATING_POINT_TYPES
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
try:
from collections import OrderedDict
except:
OrderedDict = dict
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import VariableWithOffset
SENTINEL_VALUE = []
DEFAULT_DF_FORMAT = "s"
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_current_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
# Note: commented this error message out (it may commonly happen
# if a message asking for a frame is issued while a thread is paused
# but the thread starts running before the message is actually
# handled).
# Leaving code to uncomment during tests.
# err_msg = '''find_frame: frame not found.
# Looking for thread_id:%s, frame_id:%s
# Current thread_id:%s, available frames:
# %s\n
# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
#
# sys.stderr.write(err_msg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2).
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def get_offset(attrs):
"""
Extract offset from the given attributes.
:param attrs: The string of a compound variable fields split by tabs.
If an offset is given, it must go the first element.
:return: The value of offset if given or 0.
"""
offset = 0
if attrs is not None:
try:
offset = int(attrs.split('\t')[0])
except ValueError:
pass
return offset
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
:note: PyCharm supports progressive loading of large collections and uses the `attrs`
parameter to pass the offset, e.g. 300\t\\obj\tattr1\tattr2 should return
the value of attr2 starting from the 300th element. This hack makes it possible
to add the support of progressive loading without extending of the protocol.
"""
offset = get_offset(attrs)
orig_attrs, attrs = attrs, attrs.split('\t', 1)[1] if offset else attrs
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, orig_attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
offset = get_offset(attrs)
attrs = attrs.split('\t', 1)[1] if offset else attrs
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if value is SENTINEL_VALUE:
# It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with
# `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in
# list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main
# reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.
value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = float('inf')
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return xml
class ExceedingArrayDimensionsException(Exception):
pass
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException()
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def get_column_formatter_by_type(initial_format, column_type):
if column_type in NUMPY_NUMERIC_TYPES and initial_format:
if column_type in NUMPY_FLOATING_POINT_TYPES and initial_format.strip() == DEFAULT_DF_FORMAT:
# use custom formatting for floats when default formatting is set
return array_default_format(column_type)
return initial_format
else:
return array_default_format(column_type)
def get_formatted_row_elements(row, iat, dim, cols, format, dtypes):
for c in range(cols):
val = iat[row, c] if dim > 1 else iat[row]
col_formatter = get_column_formatter_by_type(format, dtypes[c])
try:
yield ("%" + col_formatter) % (val,)
except TypeError:
yield ("%" + DEFAULT_DF_FORMAT) % (val,)
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
DATAFRAME_HEADER_LOAD_MAX_SIZE = 100
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = 'O'
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
xml = slice_to_xml(name, num_rows, num_cols, format, "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
xml += header_data_to_xml(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
return xml
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(column_type):
return get_column_formatter_by_type(format, column_type)
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, formatted_row_elements, format)
return xml
def array_data_to_xml(rows, cols, get_row, format):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % row
for value in get_row(row):
xml += var_to_xml(value, '', format=format)
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, quote(format), type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))
bounds = col_bounds[col]
col_format = "%" + col_to_format(dtypes[col])
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(dtypes[col]), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
def is_able_to_format_number(format):
try:
format % math.pi
except Exception:
return False
return True
TYPE_TO_XML_CONVERTERS = {
"ndarray": array_to_xml,
"DataFrame": dataframe_to_xml,
"Series": dataframe_to_xml,
"GeoDataFrame": dataframe_to_xml,
"GeoSeries": dataframe_to_xml
}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
| apache-2.0 |
rmm-fcul/workshops | 2015_graz/binary_choice/two_arenas_real_real/casu_utils.py | 5 | 8116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
a library of functions used in CASU controller dynamics. Got a lot of
messy code that would be neater like this
RM, Feb 2015
'''
import numpy as np
from assisipy import casu
#import matplotlib.cm as cm
from datetime import datetime
import parsing
import time
### ============= maths ============= ###
#{{{ rolling_avg
def rolling_avg(x, n):
'''
given the sample x, provide a rolling average taking n samples per data point.
NOT a quick solution, but easy...
'''
y = np.zeros((len(x),))
for ctr in range(len(x)):
y[ctr] = np.sum(x[ctr:(ctr+n)])
return y/n
#}}}
### ============= general behaviour ============= ###
#{{{ measure_ir_sensors
def measure_ir_sensors(mycasu, detect_data):
''' count up sensors that detect a bee, plus rotate history array '''
# don't discriminate between specific directions, so just accumulate all
count = 0
for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):
if (val > t):
count += 1
#print "raw:",
#print ",".join(["{:.2f}".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])
#mycasu.total_count += count # historical count over all time
detect_data = np.roll(detect_data, 1) # step all positions back
detect_data[0] = count # and overwrite the first entry (this was rolled
# around, so is the oldest entry -- and to become the newest now)
# allow ext usage to apply window -- remain agnostic here during collection.
return detect_data, count
#}}}
#{{{ heater_one_step
def heater_one_step(h):
'''legacy function'''
return detect_bee_proximity_saturated(h)
def detect_bee_proximity_saturated(h):
# measure proximity
detect_data, count = measure_ir_sensors(h, h.detect_data)
h.detect_data = detect_data
# overall bee count for this casu
sat_count = min(h.sat_lim, count) # saturates
return sat_count
#}}}
#{{{ find_mean_ext_temp
def find_mean_ext_temp(h):
r = []
for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:
r.append(h.get_temp(sensor))
if len(r):
mean = sum(r) / float(len(r))
else:
mean = 0.0
return mean
#}}}
### ============= inter-casu comms ============= ###
#{{{ comms functions
def transmit_my_count(h, sat_count, dest='accomplice'):
s = "{}".format(sat_count)
if h.verb > 1:
print "\t[i]==> {} send msg ({} by): '{}' bees, to {}".format(
h._thename, len(s), s, dest)
h.send_message(dest, s)
#TODO: this is non-specific, i.e., any message from anyone is assumed to have
# the right form. For heterogeneous neighbours, we need to check identity as
# well
def recv_all_msgs(h, retry_cnt=0, max_recv=None):
'''
continue to read message bffer until no more messages.
as list of parsed messages parsed into (src, float) pairs
'''
msgs = []
try_cnt = 0
while(True):
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
msgs.append((src, bee_cnt))
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename,
BLU, ENDC)
if h.verb > 1:
#print dir(msg)
print msg.items()
if(max_recv is not None and len(msgs) >= max_recv):
break
else:
# buffer emptied, return
try_cnt += 1
if try_cnt > retry_cnt:
break
return msgs
def recv_neighbour_msg(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = int(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
def recv_neighbour_msg_w_src(h):
''' provide the source of a message as well as the message count'''
bee_cnt = 0
src = None
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
if h.verb > 1:
#print dir(msg)
print msg.items()
return bee_cnt, src
def recv_neighbour_msg_flt(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = float(txt.split()[0])
if h.verb > 1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
#}}}
def find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):
links = parsing.find_comm_link_mapping(
name, rtc_path=rtc_path, suffix=suffix, verb=verb)
if verb:
print "[I] for {}, found the following nodes/edges".format(name)
print "\t", links.items()
print "\n===================================\n\n"
return links
### ============= display ============= ###
#{{{ term codes for colored text
ERR = '\033[41m'
BLU = '\033[34m'
ENDC = '\033[0m'
#}}}
#{{{ color funcs
#def gen_cmap(m='hot', n=32) :
# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps
def gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):
t_rng = float(max_temp - min_temp)
fr = (new_temp - min_temp) / t_rng
i = int(fr * len(cmap))
# compute basic color, if on target
#r,g,b,a = cmap(i)
g = 0.0; b = 0.0; a = 1.0;
i = sorted([0, i, len(cmap)-1])[1]
r = cmap[i]
# now adjust according to distance from target
if tgt is None: tgt=new_temp
dt = np.abs(new_temp - tgt)
dt_r = dt / t_rng
h2 = np.array([r,g,b])
h2 *= (1-dt_r)
return h2
# a colormap with 8 settings, taht doesn't depend on the presence of
# matplotlib (hard-coded though.) -- depricating
_clrs = [
(0.2, 0.2, 0.2),
(0.041, 0, 0),
(0.412, 0, 0),
(0.793, 0, 0),
(1, 0.174, 0),
(1, 0.555, 0),
(1, 0.936, 0),
(1, 1, 0.475),
(1, 1, 1),
]
_dflt_clr = (0.2, 0.2, 0.2)
# can access other gradations of colour using M = cm.hot(n) for n steps, then
# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)
# BT here we're going to use 8 steps for all CASUs so no bother.
#}}}
def sep_with_nowtime():
print "# =================== t={} =================== #\n".format(
datetime.now().strftime("%H:%M:%S"))
### ============= more generic ============= ###
#{{{ a struct constructor
# some handy python utilities, from Kier Dugan
class Struct:
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def get(self, key, default=None):
return self.__dict__.get(key, default)
def addFields(self, **kwargs):
# add other fields (basically variables) after initialisation
self.__dict__.update (kwargs)
#}}}
### calibraiont
def _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):
'''
read the sensors several times, and take the highest reading
seen as the threshold.
'''
h._raw_thresh = [0] * 7 # default cases for threshold
for stp in xrange(calib_steps):
for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):
if v > h._raw_thresh[i]:
h._raw_thresh[i] = v
time.sleep(interval)
h.thresh = [x*calib_gain for x in h._raw_thresh]
h.threshold = [x*calib_gain for x in h._raw_thresh]
if h.verb:
_ts =", ".join(["{:.2f}".format(x) for x in h.thresh])
print "[I] post-calibration, we have thresh: ", _ts
| lgpl-3.0 |
zooniverse/aggregation | docs/source/conf.py | 1 | 9778 | # -*- coding: utf-8 -*-
#
# Zooniverse Aggregation Engine documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 14 11:15:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zooniverse Aggregation Engine'
copyright = u'2016, Zooniverse'
author = u'Greg Hines'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.9'
# The full version, including alpha/beta/rc tags.
release = u'0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZooniverseAggregationEnginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ZooniverseAggregationEngine.tex', u'Zooniverse Aggregation Engine Documentation',
u'Greg Hines', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zooniverseaggregationengine', u'Zooniverse Aggregation Engine Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ZooniverseAggregationEngine', u'Zooniverse Aggregation Engine Documentation',
author, 'ZooniverseAggregationEngine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['shapely','pandas','numpy','scipy','cassandra-driver',"sklearn"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) | apache-2.0 |
akloster/bokeh | bokeh/properties.py | 20 | 42601 | """ Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import re
import types
import difflib
import datetime
import dateutil.parser
import collections
from importlib import import_module
from copy import copy
from warnings import warn
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums
from .util.string import nice_join
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class Property(object):
""" Base class for all type properties. """
def __init__(self, default=None, help=None):
""" This is how the descriptor is created in the class declaration """
if isinstance(default, types.FunctionType): # aka. lazy value
self.validate(default())
else:
self.validate(default)
self._default = default
self.__doc__ = help
self.alternatives = []
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@property
def default(self):
if not isinstance(self._default, types.FunctionType):
return copy(self._default)
else:
value = self._default()
self.validate(value)
return value
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def from_json(self, json, models=None):
return json
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def _get(self, obj):
if not hasattr(obj, self._name):
setattr(obj, self._name, self.default)
return getattr(obj, self._name)
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Include(object):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop.delegate
if prop.use_prefix:
prefix = re.sub("_props$", "", name) + "_"
else:
prefix = ""
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
if "%s" in prop.help:
doc = prop.help % subpropname.replace('_', ' ')
else:
doc = prop.help
try:
includes[fullpropname] = subprop(help=doc)
except TypeError:
includes[fullpropname] = subprop
subprop.__doc__ = doc
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
units_to_add = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
if hasattr(prop, '_units_type'):
units_to_add[name+"_units"] = prop._units_type
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
for name, prop in units_to_add.items():
prop.name = name
names.add(name)
class_dict[name] = prop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._changed_vars = set()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
props = sorted(self.properties())
if name.startswith("_") or name in props:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.changed_properties_with_values())
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties(self):
return self.changed_vars()
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = (bool,)
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (float, ) + bokeh_integer_types
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (complex, float) + bokeh_integer_types
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedProperty(Property):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
""" Base class for Container-like type properties. """
pass
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# opional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(Property):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .plot_object import PlotObject
if issubclass(self.instance_type, PlotObject):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserilize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property):
""" Any type property accepts any values. """
pass
class Function(Property):
""" Function type property. """
pass
class Event(Property):
""" Event type property. """
pass
class Interval(ParameterizedProperty):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
help = kwargs.get("help")
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(Property):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
# Check for None value
if val is None:
return dict(value=None)
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r)" % (self.__class__.__name__, val)
class NumberSpec(DataSpec):
def __init__(self, default, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
super(StringSpec, self).__set__(obj, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
super(FontSizeSpec, self).__set__(obj, value)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
self._units_type.validate(units_default)
self._units_type._default = units_default
def to_dict(self, obj):
d = super(UnitsSpec, self).to_dict(obj)
d["units"] = getattr(obj, self.name+"_units")
return d
def __set__(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units: setattr(obj, self.name+"_units", units)
super(UnitsSpec, self).__set__(obj, value)
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r, units_default=%r)" % (self.__class__.__name__, val, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DistanceSpec, self).__set__(obj, value)
class ScreenDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "screen"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(ScreenDistanceSpec, self).__set__(obj, value)
class DataDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "data"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DataDistanceSpec, self).__set__(obj, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor._values)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
| bsd-3-clause |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/frames/boxcox_test.py | 12 | 5074 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test frame.box_cox() and frame.reverse_box_cox()"""
import unittest
from sparktkregtests.lib import sparktk_test
class BoxCoxTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(BoxCoxTest, self).setUp()
dataset =\
[[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],
[8.1583747730768401], [0.35889585616853292]]
schema = [("y", float)]
self.frame = self.context.frame.create(dataset, schema=schema)
def test_wt_default(self):
""" Test behaviour for default params, lambda = 0 """
self.frame.box_cox("y")
actual = self.frame.to_pandas()["y_lambda_0.0"].tolist()
expected =\
[1.7717791879837133, 2.1946810429706676,
2.1877697201262163, 2.0990449791729704, -1.0247230268174008]
self.assertItemsEqual(actual, expected)
def test_lambda(self):
""" Test wt for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
actual = self.frame.to_pandas()["y_lambda_0.3"].tolist()
expected =\
[2.3384668540844573, 3.1056915770236082,
3.0923547540771801, 2.9235756971904037, -0.88218677941017198]
self.assertItemsEqual(actual, expected)
def test_reverse_default(self):
""" Test reverse transform for default lambda = 0 """
self.frame.box_cox("y")
self.frame.reverse_box_cox("y_lambda_0.0",
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727425, 8.9771372790941815,
8.9153072947470804, 8.1583747730768401, 0.35889585616853298]
self.assertItemsEqual(actual, expected)
def test_reverse_lambda(self):
""" Test reverse transform for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
self.frame.reverse_box_cox("y_lambda_0.3", 0.3,
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727442, 8.9771372790941797,
8.9153072947470822, 8.1583747730768419,
0.35889585616853298]
self.assertItemsEqual(actual, expected)
@unittest.skip("req not clear")
def test_lambda_negative(self):
""" Test box cox for lambda -1 """
self.frame.box_cox("y", -1)
actual = self.frame.to_pandas()["y_lambda_-1.0"].tolist()
expected =\
[0.82996979614597488, 0.88860591423406388,
0.88783336715839256, 0.87742656744575354,
-1.7863236167608822]
self.assertItemsEqual(actual, expected)
def test_existing_boxcox_column(self):
""" Test behavior for existing boxcox column """
self.frame.box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.box_cox("y", 0.3)
def test_existing_reverse_column(self):
""" Test behavior for existing reverse boxcox column """
self.frame.reverse_box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.reverse_box_cox("y", 0.3)
@unittest.skip("Req not clear")
def test_negative_col_positive_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
frame.box_cox("y", 1)
actual = frame.to_pandas()["y_lambda_1.0"].tolist()
expected = [-2.0, -3.0, 0]
self.assertItemsEqual(actual, expected)
@unittest.skip("Req not clear")
def test_negative_col_frational_lambda(self):
"""Test behaviour for negative input column and negative lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y", 0.1)
@unittest.skip("Req not clear")
def test_negative_col_zero_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
yonglehou/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.1/examples/minimal_contact_binary.py | 1 | 5694 | #!/usr/bin/env python
# coding: utf-8
# Minimal Contact Binary System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
# Here we'll initialize a default binary, but ask for it to be created as a contact system.
# In[3]:
b_cb = phoebe.default_binary(contact_binary=True)
# We'll compare this to the default detached binary
# In[4]:
b_detached = phoebe.default_binary()
# Hierarchy
# -------------
# Let's first look at the hierarchy of the default detached binary, and then compare that to the hierarchy of the overcontact system
# In[5]:
print b_detached.hierarchy
# In[6]:
print b_cb.hierarchy
# As you can see, the overcontact system has an additional "component" with method "envelope" and component label "contact_envelope".
#
# Next let's look at the parameters in the envelope and star components. You can see that most of parameters in the envelope class are constrained, while the equivalent radius of the primary is unconstrained. The value of primary equivalent radius constrains the potential and fillout factor of the envelope, as well as the equivalent radius of the secondary.
# In[7]:
print b_cb.filter(component='contact_envelope', kind='envelope', context='component')
# In[8]:
print b_cb.filter(component='primary', kind='star', context='component')
# In[9]:
b_cb['requiv@primary'] = 1.5
# In[10]:
b_cb['pot@contact_envelope@component']
# In[11]:
b_cb['fillout_factor@contact_envelope@component']
# In[12]:
b_cb['requiv@secondary@component']
# Now, of course, if we didn't originally know we wanted a contact binary and built the default detached system, we could still turn it into an contact binary just by changing the hierarchy.
# In[13]:
b_detached.add_component('envelope', component='contact_envelope')
# In[14]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'], b_detached['contact_envelope'])
print hier
# In[15]:
b_detached.set_hierarchy(hier)
# In[16]:
print b_detached.hierarchy
# However, since our system was detached, the system is not overflowing, and therefore doesn't pass system checks
# In[17]:
b_detached.run_checks()
# And because of this, the potential and requiv@secondary constraints cannot be computed
# In[18]:
b_detached['pot@component']
# In[19]:
b_detached['requiv@secondary@component']
# Likewise, we can make a contact system detached again simply by removing the envelope from the hierarchy. The parameters themselves will still exist (unless you remove them), so you can always just change the hierarchy again to change back to an overcontact system.
# In[20]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'])
print hier
# In[21]:
b_detached.set_hierarchy(hier)
# In[22]:
print b_detached.hierarchy
# Although the constraints have been removed, PHOEBE has lost the original value of the secondary radius (because of the failed contact constraints), so we'll have to reset that here as well.
# In[23]:
b_detached['requiv@secondary'] = 1.0
# Adding Datasets
# ---------------------
# In[24]:
b_cb.add_dataset('mesh', times=[0], dataset='mesh01')
# In[25]:
b_cb.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[26]:
b_cb.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[27]:
b_cb.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# For comparison, we'll do the same to our detached system
# In[28]:
b_detached.add_dataset('mesh', times=[0], dataset='mesh01')
# In[29]:
b_detached.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[30]:
b_detached.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[31]:
b_detached.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# Running Compute
# --------------------
# In[32]:
b_cb.run_compute(irrad_method='none')
# In[33]:
b_detached.run_compute(irrad_method='none')
# Synthetics
# ------------------
# To ensure compatibility with computing synthetics in detached and semi-detached systems in Phoebe, the synthetic meshes for our overcontact system are attached to each component separetely, instead of the contact envelope.
# In[34]:
print b_cb['mesh01@model'].components
# In[35]:
print b_detached['mesh01@model'].components
# Plotting
# ---------------
# ### Meshes
# In[36]:
afig, mplfig = b_cb['mesh01@model'].plot(x='ws', show=True)
# In[37]:
afig, mplfig = b_detached['mesh01@model'].plot(x='ws', show=True)
# ### Orbits
# In[38]:
afig, mplfig = b_cb['orb01@model'].plot(x='ws',show=True)
# In[39]:
afig, mplfig = b_detached['orb01@model'].plot(x='ws',show=True)
# ### Light Curves
# In[40]:
afig, mplfig = b_cb['lc01@model'].plot(show=True)
# In[41]:
afig, mplfig = b_detached['lc01@model'].plot(show=True)
# ### RVs
# In[42]:
afig, mplfig = b_cb['rv01@model'].plot(show=True)
# In[43]:
afig, mplfig = b_detached['rv01@model'].plot(show=True)
# In[ ]:
| gpl-3.0 |
jereze/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
jeremiedecock/snippets | python/matplotlib/hist_logscale_x.py | 1 | 1804 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make a histogram using a logarithmic scale on X axis
See:
- http://stackoverflow.com/questions/6855710/how-to-have-logarithmic-bins-in-a-python-histogram
"""
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [‘bar’ | ‘barstacked’ | ‘step’ | ‘stepfilled’]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
vmin = np.log10(data.min())
vmax = np.log10(data.max())
bins = np.logspace(vmin, vmax, 50) # <- make a range from 10**vmin to 10**vmax
print(bins)
res_tuple = ax2.hist(data,
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on X axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_x.png")
plt.show()
| mit |
daodaoliang/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
pypot/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/experiment029.py | 2 | 3262 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import lasagne
from gen_data_029 import gen_data, N_BATCH, LENGTH
theano.config.compute_test_value = 'raise'
# Number of units in the hidden (recurrent) layer
N_HIDDEN = 5
# SGD learning rate
LEARNING_RATE = 1e-1
# Number of iterations to train the net
N_ITERATIONS = 200
# Generate a "validation" sequence whose cost we will periodically compute
X_val, y_val = gen_data()
n_features = X_val.shape[-1]
n_output = y_val.shape[-1]
assert X_val.shape == (N_BATCH, LENGTH, n_features)
assert y_val.shape == (N_BATCH, LENGTH, n_output)
# Construct LSTM RNN: One LSTM layer and one dense output layer
l_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features))
# setup fwd and bck LSTM layer.
l_fwd = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)
l_bck = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)
# concatenate forward and backward LSTM layers
l_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN))
l_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN))
l_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)
l_recurrent_out = lasagne.layers.DenseLayer(
l_concat, num_units=n_output, nonlinearity=None)
l_out = lasagne.layers.ReshapeLayer(
l_recurrent_out, (N_BATCH, LENGTH, n_output))
input = T.tensor3('input')
target_output = T.tensor3('target_output')
# add test values
input.tag.test_value = np.random.rand(
*X_val.shape).astype(theano.config.floatX)
target_output.tag.test_value = np.random.rand(
*y_val.shape).astype(theano.config.floatX)
# Cost = mean squared error
cost = T.mean((l_out.get_output(input) - target_output)**2)
# Use NAG for training
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)
# Theano functions for training, getting output, and computing cost
train = theano.function([input, target_output],
cost, updates=updates, on_unused_input='warn',
allow_input_downcast=True)
y_pred = theano.function(
[input], l_out.get_output(input), on_unused_input='warn',
allow_input_downcast=True)
compute_cost = theano.function(
[input, target_output], cost, on_unused_input='warn',
allow_input_downcast=True)
# Train the net
def run_training():
costs = np.zeros(N_ITERATIONS)
for n in range(N_ITERATIONS):
X, y = gen_data()
# you should use your own training data mask instead of mask_val
costs[n] = train(X, y)
if not n % 10:
cost_val = compute_cost(X_val, y_val)
print "Iteration {} validation cost = {}".format(n, cost_val)
plt.plot(costs)
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.show()
def plot_estimates():
X, y = gen_data()
y_predictions = y_pred(X)
ax = plt.gca()
ax.plot(y_predictions[0,:,0], label='estimate')
ax.plot(y[0,:,0], label='ground truth')
# ax.plot(X[0,:,0], label='aggregate')
ax.legend()
plt.show()
run_training()
plot_estimates()
| mit |
zmr/namsel | accuracy_test.py | 1 | 2139 | #encoding: utf-8
import cPickle as pickle
from classify import load_cls, label_chars
from cv2 import GaussianBlur
from feature_extraction import get_zernike_moments, get_hu_moments, \
extract_features, normalize_and_extract_features
from functools import partial
import glob
from multiprocessing.pool import Pool
import numpy as np
import os
from sklearn.externals import joblib
from sobel_features import sobel_features
from transitions import transition_features
from fast_utils import fnormalize, ftrim
cls = load_cls('logistic-cls')
# Load testing sets
print 'Loading test data'
tsets = pickle.load(open('datasets/testing/training_sets.pkl', 'rb'))
scaler = joblib.load('zernike_scaler-latest')
print 'importing classifier'
print cls.get_params()
print 'scoring ...'
keys = tsets.keys()
keys.sort()
all_samples = []
## Baseline accuracies for the data in tsets
baseline = [0.608, 0.5785123966942148, 0.4782608695652174, 0.7522123893805309,
0.6884057971014492, 0.5447154471544715, 0.9752066115702479,
0.9830508474576272]
def test_accuracy(t, clsf=None):
'''Get accuracy score for a testset t'''
if clsf:
cls = clsf
else:
global cls
y = tsets[t][:,0]
x = tsets[t][:,1:]
x3 = []
for j in x:
j = ftrim(j.reshape((32,16)).astype(np.uint8))
x3.append(normalize_and_extract_features(j))
pred = cls.predict(x3)
s = 0
for i, p in enumerate(pred):
if float(p) == y[i]:
s += 1.0
else:
pass
print 'correct', label_chars[y[i]], '||', label_chars[p], t #, max(cls.predict_proba(x3[i])[0])
score = s / len(y)
return score
def test_all(clsf=None):
'''Run accuracy tests for all testsets'''
print 'starting tests. this will take a moment'
test_accuracy(keys[0], clsf)
test_all = partial(test_accuracy, clsf=clsf)
p = Pool()
all_samples = p.map(test_all, keys)
for t, s in zip(keys, all_samples):
print t, s
return np.mean(all_samples)
if __name__ == '__main__':
print test_all()
| mit |
ephes/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
aguirrea/lucy | tests/lfootGraph.py | 1 | 6007 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Andrés Aguirre Dorelo
#
# MINA/INCO/UDELAR
#
# module for finding the steps in the tutors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import glob
import ntpath
from parser.BvhImport import BvhImport
import matplotlib.pyplot as plt
from configuration.LoadSystemConfiguration import LoadSystemConfiguration
import numpy as np
from scipy.signal import argrelextrema
from collections import Counter
sysConf = LoadSystemConfiguration()
BVHDir = os.getcwd() + sysConf.getDirectory("CMU mocap Files")
Y_THREADHOLD = 11 #TODO calculate this as the average of the steps_highs
X_THREADHOLD = 36
def firstMax(values1, values2):
res=0
for i in range(len(values1)-2):
if values1[i] < values1[i+1] and values1[i+1] > values1[i+2]: #i+1 is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i+1
elif values1[i] < values1[i+1] < values1[i+2]: #i is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i
return res
def find_nearest(a, a0):
"Element in nd array `a` closest to the scalar value `a0`"
idx = np.abs(a - a0).argmin()
return a.flat[idx]
for filename in glob.glob(os.path.join(BVHDir, '*.bvh')):
print "transforming: " + filename + " ..."
parser = BvhImport(filename)
x_,y_,z_ = parser.getNodePositionsFromName("lFoot")
y1 = []
y2 = []
x1 = []
x2 = []
for key, value in y_.iteritems():
y1.append(value)
x1.append(key)
x_,y_,z_ = parser.getNodePositionsFromName("rFoot")
for key, value in y_.iteritems():
y2.append(value)
x2.append(key)
maxLfootIndexes = [x for x in argrelextrema(np.array(y1), np.greater)[0]]
maxRfootIndexes = [x for x in argrelextrema(np.array(y2), np.greater)[0]]
stepsLfootIndexes = []
for i in range(len(maxLfootIndexes)):
index = maxLfootIndexes[i]
if y1[index] - y2[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsLfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsLfootIndexes), index) > X_THREADHOLD): #avoid max near an existing point
stepsLfootIndexes.append(index)
print "appeend L"
else:
if y1[find_nearest(np.array(stepsLfootIndexes), index)] < y1[index]: #check if the exiting near max is a local maximun
print "remove L", find_nearest(np.array(stepsLfootIndexes), index), "from: ", stepsLfootIndexes
stepsLfootIndexes.remove(find_nearest(np.array(stepsLfootIndexes), index))
print "remove L"
stepsLfootIndexes.append(index)
print "appeend L"
else:
stepsLfootIndexes.append(index)
print "appeend L"
stepsRfootIndexes = []
for i in range(len(maxRfootIndexes)):
index = maxRfootIndexes[i]
if y2[index] - y1[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsRfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsRfootIndexes),index) > X_THREADHOLD): #avoid max near an existing point
stepsRfootIndexes.append(index)
print "appeend R"
else:
if y2[find_nearest(np.array(stepsRfootIndexes), index)] < y2[index]: #check if the exiting near max is a local maximun
print "remove R", find_nearest(np.array(stepsRfootIndexes), index), "from: ", stepsRfootIndexes, "index: ", index
stepsRfootIndexes.remove(find_nearest(np.array(stepsRfootIndexes), index))
print "remove R"
stepsRfootIndexes.append(index)
print "appeend R"
else:
stepsRfootIndexes.append(index)
print "appeend R"
if stepsLfootIndexes[0] < stepsRfootIndexes[0]:
if len(stepsLfootIndexes) > 2:
testPoint = stepsLfootIndexes[1]
while(y1[testPoint]>y2[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: ", stepsLfootIndexes[1], "end: ", end
else:
end = len(y1)
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: -----", "end: ", end
else:
if len(stepsRfootIndexes) > 2:
testPoint = stepsRfootIndexes[1]
while(y2[testPoint]>y1[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: ", stepsRfootIndexes[1], "end: ", end
else:
end = len(y2)
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: -----", "end: ", end
plt.plot(x1, y1,'ro')
plt.plot(x1, y2,'g')
plt.show()
| gpl-3.0 |
otmaneJai/Zipline | zipline/utils/tradingcalendar_bmf.py | 17 | 7576 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime, \
get_open_and_closes
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
# Universal confraternization
conf_universal = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(conf_universal)
# Sao Paulo city birthday
aniversario_sao_paulo = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aniversario_sao_paulo)
# Carnival Monday
carnaval_segunda = rrule.rrule(
rrule.MONTHLY,
byeaster=-48,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_segunda)
# Carnival Tuesday
carnaval_terca = rrule.rrule(
rrule.MONTHLY,
byeaster=-47,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_terca)
# Passion of the Christ
sexta_paixao = rrule.rrule(
rrule.MONTHLY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(sexta_paixao)
# Corpus Christi
corpus_christi = rrule.rrule(
rrule.MONTHLY,
byeaster=60,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(corpus_christi)
tiradentes = rrule.rrule(
rrule.MONTHLY,
bymonth=4,
bymonthday=21,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(tiradentes)
# Labor day
dia_trabalho = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(dia_trabalho)
# Constitutionalist Revolution
constitucionalista = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=9,
cache=True,
dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(constitucionalista)
# Independency day
independencia = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
bymonthday=7,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(independencia)
# Our Lady of Aparecida
aparecida = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=12,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aparecida)
# All Souls' day
finados = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(finados)
# Proclamation of the Republic
proclamacao_republica = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=15,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(proclamacao_republica)
# Day of Black Awareness
consciencia_negra = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=20,
cache=True,
dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(consciencia_negra)
# Christmas Eve
vespera_natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(vespera_natal)
# Christmas
natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(natal)
# New Year Eve
ano_novo = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=31,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo)
# New Year Eve on saturday
ano_novo_sab = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=30,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo_sab)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# World Cup 2014 Opening
non_trading_days.append(datetime(2014, 6, 12, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Ash Wednesday
quarta_cinzas = rrule.rrule(
rrule.MONTHLY,
byeaster=-46,
cache=True,
dtstart=start,
until=end
)
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
early_close_rules = []
early_close_rules.append(quarta_cinzas)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
# only "early close" event in Bovespa actually is a late start
# as the market only opens at 1pm
open_hour = 13 if day in quarta_cinzas else 10
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=open_hour,
minute=00),
tz='America/Sao_Paulo').tz_convert('UTC')
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=16),
tz='America/Sao_Paulo').tz_convert('UTC')
return market_open, market_close
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
kylerbrown/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
nasseralkmim/SaPy | sapy/plotter.py | 1 | 4743 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Line3D
from matplotlib.lines import Line2D
import numpy as np
def window(name):
return plt.figure(name)
def show():
plt.show()
return None
def undeformed(model):
"""Plot the undeformed structure according to the dimension
"""
if model.ndm == 2:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
label2d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
if model.ndm == 3:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
label3d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
def deformed(model, U):
"""Plot the deformed structure according to the dimension
"""
CON = model.CON
XYZ = np.copy(model.XYZ)
for n in range(model.nn):
for d in range(model.ndf[n]):
dof = model.DOF[n, d]
XYZ[n, d] += U[dof]
if model.ndm == 2:
deformed = window('Deformed')
axes = deformed.add_subplot(111, aspect='equal')
geo2d(XYZ, CON, axes, 'tomato')
geo2d(model.XYZ, model.CON, axes, 'black')
label2d(XYZ, CON, axes)
deformed.tight_layout()
if model.ndm == 3:
deformed = window('Deformed')
axes = deformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
geo3d(XYZ, CON, axes, 'tomato')
label3d(XYZ, CON, axes)
deformed.tight_layout()
def geo3d(XYZ, CON, axes, color):
"""Plot the 3d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
# draw nodes
for node, xyz in enumerate(XYZ):
axes.scatter(xyz[0], xyz[1], xyz[2], c='k', alpha=1, marker='s')
# draw edges
for ele, con in enumerate(CON):
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
zs = [XYZ[con[0]][2], XYZ[con[1]][2]]
line = Line3D(xs, ys, zs, linewidth=1.0, color=color)
axes.add_line(line)
def label3d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], xyz[2], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(ele), color='g', size=10)
def geo2d(XYZ, CON, axes, color):
"""Plot the 2d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
# draw nodes
for xyz in XYZ:
axes.scatter(xyz[0], xyz[1], c='k', alpha=1, marker='s')
# draw edges
for con in CON:
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
line = Line2D(xs, ys, linewidth=1.0, color=color)
axes.add_line(line)
def label2d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(ele), color='g', size=10)
def axialforce(model, Q):
"""Plot axial force
"""
if model.ndm == 2:
axial = window('Axial')
axes = axial.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
axial2d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
if model.ndm == 3:
axial = window('Axial')
axes = axial.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
axial3d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
def axial2d(XYZ, CON, Q, axes):
"""Plot text with axial force value
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(np.round_(Q[ele], 1)), color='g', size=10)
def axial3d(XYZ, CON, Q, axes):
"""Plot text with axial force value for 3d plot
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(np.round_(Q[ele], 1)), color='g', size=10)
| gpl-3.0 |
hennersz/pySpace | basemap/doc/users/figures/omerc.py | 6 | 1065 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# setup oblique mercator basemap.
# width is width of map projection region in km (xmax-xmin_
# height is height of map projection region in km (ymax-ymin)
# lon_0, lat_0 are the central longitude and latitude of the projection.
# lat_1,lon_1 and lat_2,lon_2 are two pairs of points that define
# the projection centerline.
# Map projection coordinates are automatically rotated to true north.
# To avoid this, set no_rot=True.
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
m = Basemap(height=16700000,width=12000000,
resolution='l',area_thresh=1000.,projection='omerc',\
lon_0=-100,lat_0=15,lon_2=-120,lat_2=65,lon_1=-50,lat_1=-55)
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
m.drawmapboundary(fill_color='aqua')
plt.title("Oblique Mercator Projection")
plt.show()
| gpl-3.0 |
jlegendary/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ephes/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
melqkiades/yelp | source/python/topicmodeling/external/topicensemble/unsupervised/nmf.py | 2 | 1622 | import numpy as np
from sklearn import decomposition
import logging as log
# --------------------------------------------------------------
class SklNMF:
"""
Wrapper class backed by the scikit-learn package NMF implementation.
"""
def __init__( self, max_iters = 100, init_strategy = "random" ):
self.max_iters = 100
self.init_strategy = init_strategy
self.W = None
self.H = None
def apply( self, X, k = 2, init_W = None, init_H = None ):
"""
Apply NMF to the specified document-term matrix X.
"""
self.W = None
self.H = None
random_seed = np.random.randint( 1, 100000 )
if not (init_W is None or init_H is None):
model = decomposition.NMF( init="custom", n_components=k, max_iter=self.max_iters, random_state = random_seed )
self.W = model.fit_transform( X, W=init_W, H=init_H )
else:
model = decomposition.NMF( init=self.init_strategy, n_components=k, max_iter=self.max_iters, random_state = random_seed )
self.W = model.fit_transform( X )
self.H = model.components_
def rank_terms( self, topic_index, top = -1 ):
"""
Return the top ranked terms for the specified topic, generated during the last NMF run.
"""
if self.H is None:
raise ValueError("No results for previous run available")
# NB: reverse
top_indices = np.argsort( self.H[topic_index,:] )[::-1]
# truncate if necessary
if top < 1 or top > len(top_indices):
return top_indices
return top_indices[0:top]
def generate_partition( self ):
if self.W is None:
raise ValueError("No results for previous run available")
return np.argmax( self.W, axis = 1 ).flatten().tolist()
| lgpl-2.1 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tri/triplot.py | 8 | 3150 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| apache-2.0 |
joshgabriel/dft-crossfilter | CompleteApp/crossfilter_app/old_mains/old_main.py | 3 | 10263 | # main.py that controls the whole app
# to run: just run bokeh serve --show crossfilter_app in the benchmark-view repo
from random import random
import os
from bokeh.layouts import column
from bokeh.models import Button
from bokeh.models.widgets import Select, MultiSelect, Slider
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
#### CROSSFILTER PART ##### >>> Module load errors throwing up how to do a relative import ?
from crossview.crossfilter.models import CrossFilter
#from benchmark.loader import load
#### DATA INPUT FROM REST API ######
#from benchmark.loader import load
#### DATA INPUT STRAIGHT FROM PANDAS for test purposes ####
import pandas as pd
##### PLOTTING PART -- GLOBAL FIGURE CREATION ########
# create a plot and style its properties
## gloabl data interface to come from REST API
vasp_data = pd.read_csv('../benchmark/data/francesca_data_head.csv')
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location='below')
#p.border_fill_color = 'black'
#p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
#### FORMAT OF DATA SENT TO WIDGET #######
# add a text renderer to out plot (no data yet)
r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt",
text_baseline="middle", text_align="center")
r2 = p.circle(x=[], y=[])
i = 0
ds = r.data_source
ds2 = r2.data_source
##### WIDGET RESPONSES IN THE FORM OF CALLBACKS ######
# create a callback that will add a number in a random location
def callback():
global i
# BEST PRACTICE --- update .data in one step with a new dict
new_data = dict()
new_data['x'] = ds.data['x'] + [random()*70 + 15]
new_data['y'] = ds.data['y'] + [random()*70 + 15]
new_data['text_color'] = ds.data['text_color'] + [RdYlBu3[i%3]]
new_data['text'] = ds.data['text'] + [str(i)]
ds.data = new_data
i = i + 1
#### The make crossfilter callback
#### make data loading as easy as possible for now straight from
#### the benchmark data csv file not from the API with the decorators
#### TO DO after we see that the crossfilter and new bokeh play nicely
##########: integrate with API and uncomment the decorators and data loader
#@bokeh_app.route("/bokeh/benchmark/")
#@object_page("benchmark")
#### RENDERERS OF WIDGETS #####
def make_bokeh_crossfilter(axis='k-point'):
"""The root crossfilter controller"""
# Loading the dft data head as a
# pandas dataframe
new_data = dict()
# new_data = load("./benchmark/data/francesca_data_head")
# use a straight pandas dataframe for now instead and follow the
# BEST PRACTICE described above basically clean up the data object on each callback.
# data that will be given back on the callback
new_data = vasp_data # our data that will be replaced by the API
global p
p = CrossFilter.create(df=new_data)
print (type(p))
# dont know what Crossfilter class really returns in terms of data but for testnig purposes lets
# return something that is compatible with the new_data dictionary return in the
# vanilla example through the global object ds.data
# for example the x - y coordinates on the plots correspond to mins on the data set in k-point and value fields
# new_data['x'] = ds2.data['x'] + list(data[axis])
# new_data['y'] = ds2.data['y'] + list(data['value'])
# other stuff default as in vanilla callback()
# for test purposes to see actually what coordinate is getting plotted
# it is always going to be the same duh beccause only one min exist in the dataset
# its at x = 6, y = -12 ,
# SUCESS learnt how to create a custom callback !!! that loads a CSV file and does something with it
# print ("New data from crossfilter", new_data)
# finally assign to ds.data
# ds2.data = new_data
def make_wflow_crossfilter(tags={'element_widget':['Cu', 'Pd', 'Mo'], 'code_widget':['VASP'], 'ExchCorr':['PBE']}):
"""
demo crossfilter based on pure pandas dataframes that serves a data processing
workflow that selects inputs from widgets
args:
tags: dict of selections by upto 3 widgets
returns:
dictionary of crossfiltered dataframes that can further be processed down the workflow
"""
## Actual widget controlled inputs ##
# elements = tags['element']
# exchanges = tags['ExchCorr']
# propys = tags['code_widget']
## Demo user inputs for testing selects everything in the test csv : max data load ##
elements = np.unique(vasp_data['element'])
exchanges = np.unique(vasp_data['exchange'])
propys = ['B','dB','a0']
# final dictionary of crossfiltered dataframes
crossfilts = {}
# crossfiltering part - playing the role of the "Crossfilter class in bokeh.models"
for pr in propys:
for el in elements:
for ex in exchanges:
# crossfilter down to exchange and element
elems = vasp_data[vasp_data['element']==el]
exchs = elems[elems['exchange']==ex]
# separate into properties, energy, kpoints
p = exchs[exchs['property']==pr]
e = exchs[exchs['property']=='e0']
##### *** Accuracy calculation based on default standards *** #####
# choose reference from dict
ref_e = expt_ref_prb[el][pr]
ref_w = wien_ref[el][pr]
# calculate percent errors on property - ACCURACY CALCULATION based on default standards
props = [v for v in p['value'] ]
percs_wien = [ (v - ref_w) / ref_w * 100 for v in p['value']]
percs_prb = [ (v - ref_e) / ref_e * 100 for v in p['value']]
kpts = [ k for k in p['k-point']]
kpts_atom = [ k**3 for k in p['k-point'] ]
##### *** Accuracy calculation based on default standards *** #####
##### *** Calculate prec_sigma of energy *** #####
energy = [ v for v in e['value']]
end= len(energy) - 1
prec_sigma = [ v - energy[end] for v in energy]
# make data frame of kpoints, energy, percent errors on property
if kpts and energy and props:
NAME = '_'.join([el,ex,pr])
Rdata =\
pd.DataFrame({'Kpoints_size':kpts, 'Kpoints_atom_density':kpts_atom, 'Energy':energy, 'Prec_Sigma':prec_sigma , pr:props, 'percent_error_wien':percs_wien, 'percent_error_expt':percs_prb })
crossfilts[NAME] = Rdata
def calculate_prec(cross_df, automate= False):
"""
function that calculates the prec_inf using R
and returns a fully contructed plottable dataframe
Args:
cross_df: pandas dataframe containing the data
automate: bool, a To do feature to automatically calculate the best fit
Returns:
dataframe contining the R added precision values to be
received most always by the plotting commander.
"""
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
import rpy2.rinterface as rin
stats = importr('stats')
base = importr('base')
# activate R environemnt in python
rpy2.robjects.numpy2ri.activate()
pandas2ri.activate()
# read in necessary elements ofmenu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")]
df = pd.DataFrame({'x': cross_df['Kpoints_atom_density'],
'y': cross_df['Energy']})
ro.globalenv['dataframe']=df
### *** R used to obtain the fit on the data to calculate prec_inf *** ###
# perform regression - bokeh widgets can be used here to provide the inputs to the nls regression
# some python to R translation of object names via the pandas - R dataframes
y = df['y']
x = df['x']
l = len(y) - 1 # needed because R indexes list from 1 to len(list)
# ***WIDGET inputs*** # OR AUTOMATE
# the slider inputs on starting point or can be automated also
l1 = 3
l2 = 0
fitover = rin.SexpVector(list(range(l1,l-l2)), rin.INTSXP)
# numeric entry widget for 'b' is plausible for user to choose best starting guess
start_guess = {'a': y[l], 'b': 5}
start=pandas2ri.py2ri(pd.DataFrame(start_guess,index=start_guess))
# drop down list selection of model
model = 'y~a*x/(b+x)'
# Minimize function with weights and selection
m = \
stats.nls(model, start = start, algorithm = "port", subset = fitover, weights = x^2, data=base.as_symbol('dataframe'))
# Estimation of goodness of fit
g = stats.cor(y[l1:l-l2],stats.predict(m))
# Report summary of fit, values and error bars
print( base.summary(m).rx2('coefficients') )
# Extrapolation value is given by a
a = stats.coef(m)[1]
# Calculation of precision
prec = abs(y-a)
# test print outs of the data ? how to render onto html like Shiny if necesary ?
print("We learn that the converged value is: {0} and best precision achieved in the measurement is {1}".format(a, min(abs(prec))))
cross_df['Energy_Prec_Inf'] = prec
# close the R environments
rpy2.robjects.numpy2ri.deactivate()
pandas2ri.deactivate()
return (cross_df)
def make_widgets():
"""
main function that will control the rendering of UI widgets
"""
pass
#### WIDGET CREATIONS ####
# OLD VANILLA
# add a button widget and configure with the call back
# button_basic = Button(label="Press Me")
# button_basic.on_click(callback)
#make_bokeh_crossfilter()
# create a button for Select button for input
#menu = [("Bulk Modulus", "B"), ("B'", "dB"), ("Lattice Constant", "a0")]
#select_property = Select(name="Selection", options=menu, value="B")
#select_property.on_click(make_bokeh_crossfilter(axis=value))
# create a button for make crossfilter app
button_crossfilter = Button(label="Make Crossfilter")
button_crossfilter.on_click(make_bokeh_crossfilter)
#create a button for crossfilter_workflwo
button_w_crossfilter = Button(label="Make Crossfilter Workflow")
button_w_crossfilter.on_click(make_wflow_crossfilter)
# put the button and plot in a layout and add to the document
curdoc().add_root(column(button_crossfilter, button_w_crossfilter, p))
| mit |
Tuyki/TT_RNN | MNISTSeq.py | 1 | 14227 | __author__ = "Yinchong Yang"
__copyright__ = "Siemens AG, 2018"
__licencse__ = "MIT"
__version__ = "0.1"
"""
MIT License
Copyright (c) 2018 Siemens AG
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
We first sample MNIST digits to form sequences of random lengths.
The sequence is labeled as one if it contains a zero, and is labeled zero otherwise.
This simulates a high dimensional sequence classification task, such as predicting therapy decision
and survival of patients based on their historical clinical event information.
We train plain LSTM and Tensor-Train LSTM for this task.
After the training, we apply Layer-wise Relevance Propagation to identify the digit(s) that
have influenced the classification.
Apparently, we would expect the LRP algorithm would assign high relevance value to the zero(s)
in the sequence.
These experiments turn out to be successful, which demonstrates that
i) the LSTM and TT-LSTM can indeed learn the mapping from a zero to the sequence class, and that
ii) both LSTMs have no problem in storing the zero pattern over a period of time, because the
classifier is deployed only at the last hidden state, and that
iii) the implementation of the LRP algorithm, complex as it is, is also correct, in that
the zeros are assigned high relevance scores.
Especially the experiments with the plain LSTM serve as simulation study supporting our submission of
“Yinchong Yang, Volker Tresp, Marius Wunderle, Peter A. Fasching,
Explaining Therapy Predictions with Layer-wise Relevance Propagation in Neural Networks, at IEEE ICHI 2018”.
The original LRP for LSTM from the repository:
https://github.com/ArrasL/LRP_for_LSTM
which we modified and adjusted for keras models.
Feel free to experiment with the hyper parameters and suggest other sequence classification tasks.
Have fun ;)
"""
import pickle
import sys
import numpy as np
from numpy import newaxis as na
import keras
from keras.layers.recurrent import Recurrent
from keras import backend as K
from keras.engine import InputSpec
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine.topology import Layer
from TTLayer import *
from TTRNN import TT_LSTM
def make_seq(n, x, y, maxlen=32, seed=123):
np.random.seed(seed)
lens = np.random.choice(range(2, maxlen), n)
seqs = np.zeros((n, maxlen, 28**2))
labels = np.zeros(n)
digits_label = np.zeros((n, maxlen), dtype='int32')-1
ids = np.zeros((n, maxlen), dtype='int64')-1
for i in range(n):
digits_inds = np.random.choice(range(x.shape[0]), lens[i])
ids[i, -lens[i]::] = digits_inds
seqs[i, -lens[i]::, :] = x[digits_inds]
digits_label[i, -lens[i]::] = y[digits_inds]
class_inds = y[digits_inds]
if True:
# option 1: is there any 0 in the sequence?
labels[i] = (0 in class_inds)
else:
# option 2: even number of 0 -> label=0, odd number of 0 -> label=1
labels[i] = len(np.where(class_inds == 0)[0]) % 2 == 1
return [seqs, labels, digits_label, ids]
# From: https://github.com/ArrasL/LRP_for_LSTM
def lrp_linear(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False):
"""
LRP for a linear layer with input dim D and output dim M.
Args:
- hin: forward pass input, of shape (D,)
- w: connection weights, of shape (D, M)
- b: biases, of shape (M,)
- hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)
- Rout: relevance at layer output, of shape (M,)
- bias_nb_units: number of lower-layer units onto which the bias/stabilizer contribution is redistributed
- eps: stabilizer (small positive number)
- bias_factor: for global relevance conservation set to 1.0, otherwise 0.0 to ignore bias redistribution
Returns:
- Rin: relevance at layer input, of shape (D,)
"""
sign_out = np.where(hout[na, :] >= 0, 1., -1.) # shape (1, M)
numer = (w * hin[:, na]) + \
((bias_factor * b[na, :] * 1. + eps * sign_out * 1.) * 1. / bias_nb_units) # shape (D, M)
denom = hout[na, :] + (eps * sign_out * 1.) # shape (1, M)
message = (numer / denom) * Rout[na, :] # shape (D, M)
Rin = message.sum(axis=1) # shape (D,)
# Note: local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D
# global network relevance conservation if bias_factor==1.0 (can be used for sanity check)
if debug:
print("local diff: ", Rout.sum() - Rin.sum())
return Rin
def sigmoid(x):
x = x.astype('float128')
return 1. / (1. + np.exp(-x))
# Modified from https://github.com/ArrasL/LRP_for_LSTM
def lstm_lrp(l, d, train_data = True):
if train_data:
x_l = X_tr[l]
y_l = Y_tr[l]
z_l = Z_tr[l]
# d_l = d_tr[l]
else:
x_l = X_te[l]
y_l = Y_te[l]
z_l = Z_te[l]
# d_l = d_te[l]
# calculate the FF pass in LSTM for every time step
pre_gates = np.zeros((MAXLEN, d*4))
gates = np.zeros((MAXLEN, d * 4))
h = np.zeros((MAXLEN, d))
c = np.zeros((MAXLEN, d))
for t in range(MAXLEN):
z = np.dot(x_l[t], Ws)
if t > 0:
z += np.dot(h[t-1], Us)
z += b
pre_gates[t] = z
z0 = z[0:d]
z1 = z[d:2*d]
z2 = z[2*d:3*d]
z3 = z[3 * d::]
i = sigmoid(z0)
f = sigmoid(z1)
c[t] = f * c[t-1] + i * np.tanh(z2)
o = sigmoid(z3)
h[t] = o * np.tanh(c[t])
gates[t] = np.concatenate([i, f, np.tanh(z2), o])
# check: z_l[12] / h[-1][12]
Rh = np.zeros((MAXLEN, d))
Rc = np.zeros((MAXLEN, d))
Rg = np.zeros((MAXLEN, d))
Rx = np.zeros((MAXLEN, 28**2))
bias_factor = 0
Rh[MAXLEN-1] = lrp_linear(hin=z_l,
w=Dense_w,
b=np.array(Dense_b),
hout=np.dot(z_l, Dense_w)+Dense_b,
Rout=np.array([y_l]),
bias_nb_units=len(z_l),
eps=eps,
bias_factor=bias_factor)
for t in reversed(range(MAXLEN)):
# t = MAXLEN-1
# print t
Rc[t] += Rh[t]
# Rc[t] = Rh[t]
if t > 0:
Rc[t-1] = lrp_linear(gates[t, d: 2 * d] * c[t - 1], # gates[t , 2 *d: 3 *d ] *c[ t -1],
np.identity(d),
np.zeros((d)),
c[t],
Rc[t],
2*d,
eps,
bias_factor,
debug=False)
Rg[t] = lrp_linear(gates[t, 0:d] * gates[t, 2*d:3*d], # h_input: i + g
np.identity(d), # W
np.zeros((d)), # b
c[t], # h_output
Rc[t], # R_output
2 * d,
eps,
bias_factor,
debug=False)
# foo = np.dot(x_l[t], Ws[:,2*d:3*d]) + np.dot(h[t-1], Us[:, 2*d:3*d]) + b[2*d:3*d]
Rx[t] = lrp_linear(x_l[t],
Ws[:,2*d:3*d],
b[2*d:3*d],
pre_gates[t, 2*d:3*d],
Rg[t],
d + 28 ** 2,
eps,
bias_factor,
debug=False)
if t > 0:
Rh[t-1] = lrp_linear(h[t-1],
Us[:,2*d:3*d],
b[2*d:3*d],
pre_gates[t, 2 * d:3 * d],
Rg[t],
d + 28**2,
eps,
bias_factor,
debug=False)
# hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False
# Rx[np.where(d_l==-1.)[0]] *= 0
return Rx
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Model, Input
from keras.layers import Dense, GRU, LSTM, Dropout, Masking
from keras.optimizers import *
from keras.regularizers import l2
from sklearn.metrics import *
# Script configurations ###################################################################
seed=111111
use_TT = True # whether use Tensor-Train or plain RNNs
# Prepare the data ########################################################################
# Load the MNIST data and build sequences:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], -1)
x_test = x_test.reshape(x_test.shape[0], -1)
MAXLEN = 32 # max length of the sequences
X_tr, Y_tr, d_tr, idx_tr = make_seq(n=10000, x=x_train, y=y_train, maxlen=MAXLEN, seed=seed)
X_te, Y_te, d_te, idx_te = make_seq(n=1000, x=x_test, y=y_test, maxlen=MAXLEN, seed=seed+1)
# Define the model ######################################################################
if use_TT:
# TT settings
tt_input_shape = [7, 7, 16]
tt_output_shape = [4, 4, 4]
tt_ranks = [1, 4, 4, 1]
rnn_size = 64
X = Input(shape=X_tr.shape[1::])
X_mask = Masking(mask_value=0.0, input_shape=X_tr.shape[1::])(X)
if use_TT:
Z = TT_LSTM(tt_input_shape=tt_input_shape, tt_output_shape=tt_output_shape, tt_ranks=tt_ranks,
return_sequences=False, recurrent_dropout=.5)(X_mask)
Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)
else:
Z = LSTM(units=rnn_size, return_sequences=False, recurrent_dropout=.5)(X_mask) # dropout=.5,
Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)
rnn_model = Model(X, Out)
rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model and save the results ######################################################
rnn_model.fit(X_tr, Y_tr, epochs=50, batch_size=32, validation_split=.2, verbose=2)
Y_hat = rnn_model.predict(X_tr, verbose=2).reshape(-1)
train_acc = (np.round(Y_hat) == Y_tr).mean()
Y_pred = rnn_model.predict(X_te, verbose=2).reshape(-1)
(np.round(Y_pred) == Y_te).mean()
pred_acc = (np.round(Y_pred) == Y_te).mean()
# Collect all hidden layers ################################################################
if use_TT:
# Reconstruct the fully connected input-to-hidden weights:
from keras.initializers import constant
_tt_output_shape = np.copy(tt_output_shape)
_tt_output_shape[0] *= 4
fc_w = rnn_model.get_weights()[0]
fc_layer = TT_Layer(tt_input_shape=tt_input_shape, tt_output_shape=_tt_output_shape, tt_ranks=tt_ranks,
kernel_initializer=constant(value=fc_w), use_bias=False)
fc_input = Input(shape=(X_tr.shape[2],))
fc_output = fc_layer(fc_input)
fc_model = Model(fc_input, fc_output)
fc_model.compile('sgd', 'mse')
fc_recon_mat = fc_model.predict(np.identity(X_tr.shape[2]))
# Reconstruct the entire LSTM:
fc_Z = LSTM(units=np.prod(tt_output_shape), return_sequences=False, dropout=.5, recurrent_dropout=.5,
weights=[fc_recon_mat, rnn_model.get_weights()[2], rnn_model.get_weights()[1]])(X_mask)
else:
fc_Z = LSTM(units=rnn_size, return_sequences=False, dropout=.5, recurrent_dropout=.5,
weights=rnn_model.get_weights()[0:3])(X_mask)
fc_Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-3),
weights=rnn_model.get_weights()[3::])(fc_Z)
fc_rnn_model = Model(X, fc_Out)
fc_rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',
metrics=['accuracy'])
fc_rnn_model.evaluate(X_te, Y_te, verbose=2)
# Calculate the LRP: #########################################################################
fc_Z_model = Model(X, fc_Z)
fc_Z_model.compile('sgd', 'mse')
Y_hat_fc = fc_rnn_model.predict(X_tr)
Y_pred_fc = fc_rnn_model.predict(X_te)
Ws = fc_rnn_model.get_weights()[0]
Us = fc_rnn_model.get_weights()[1]
b = fc_rnn_model.get_weights()[2]
Dense_w = fc_rnn_model.get_weights()[3]
Dense_b = fc_rnn_model.get_weights()[4]
Z_tr = fc_Z_model.predict(X_tr)
Z_te = fc_Z_model.predict(X_te)
eps = 1e-4
is_number_flag = np.where(d_te != -1)
# All relevance scores of the test sequences
lrp_te = np.vstack([lstm_lrp(i, rnn_size, False).sum(1) for i in range(X_te.shape[0])])
lrp_auroc = roc_auc_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),
lrp_te[is_number_flag].reshape(-1))
lrp_auprc = average_precision_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),
lrp_te[is_number_flag].reshape(-1))
# The reported results:
print pred_acc
print lrp_auroc
print lrp_auprc
| mit |