repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py | 1 | 4546 | import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()]) | bsd-3-clause |
winklerand/pandas | pandas/tests/test_errors.py | 9 | 1147 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
with catch_warnings(record=True):
try:
raise ParserError()
except pd.parser.CParserError:
pass
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/frame/test_query_eval.py | 11 | 42389 | # -*- coding: utf-8 -*-
from __future__ import print_function
import operator
import pytest
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import DataFrame, Series, Index, MultiIndex, date_range
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
from pandas.core.computation import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
pytest.skip("cannot query engine numexpr when numexpr not "
"installed")
class TestCompat(object):
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query('A>0')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query('A>0', engine=None)
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine=None)
assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query('A>0', engine='python')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='python')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query('A>0', engine='numexpr')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
pytest.raises(ImportError,
lambda: df.query('A>0', engine='numexpr'))
pytest.raises(ImportError,
lambda: df.eval('A+1', engine='numexpr'))
class TestDataFrameEval(TestData):
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [4, 4000]:
df = DataFrame(1, index=range(n), columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__')]:
base = (DataFrame(np.tile(m.values, n) # noqa
.reshape(n, -1),
columns=list('abcd')))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result, expected)
# these are commutative
if op in ['+', '*']:
result = getattr(df, op)(m)
assert_frame_equal(result, expected)
# these are not
elif op in ['-', '/']:
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1 - np.isnan(df.iloc[0:25]))
result = (1 - np.isnan(df)).iloc[0:25]
assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']})
msg = "expr must be a string to be evaluated"
with tm.assert_raises_regex(ValueError, msg):
df.query(lambda x: x.B == "b")
with tm.assert_raises_regex(ValueError, msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({'A': [1, 2, 3]})
msg = "expr cannot be an empty string"
with tm.assert_raises_regex(ValueError, msg):
df.query('')
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(object):
def test_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser,
engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser,
engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with pytest.raises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with pytest.raises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2 # noqa
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assert_raises_regex(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assert_raises_regex(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
assert_frame_equal(result, expected)
df = DataFrame({'index': a,
'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine,
parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = TestData().frame
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval('x + 1', engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPython, cls).setup_class()
cls.engine = cls.parser = 'python'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryStrings(object):
def test_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
pytest.raises(NotImplementedError, df.query, ex,
engine=engine, parser=parser,
local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(np.random.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US' # noqa
r = df.query('Symbol == @symb', parser=parser, engine=engine)
assert_frame_equal(e, r)
class TestDataFrameEvalNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setup_method(self, method):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assert_raises_regex(TypeError,
"unsupported operand type\(s\) "
"for .+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameEvalNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameEvalPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setup_class(cls):
cls.engine = cls.parser = 'python'
| mit |
JT5D/scikit-learn | examples/plot_multilabel.py | 9 | 4299 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
# Convert list of tuples to a class indicator matrix first
Y_indicator = LabelBinarizer().fit(Y).transform(Y)
X = CCA(n_components=2).fit(X, Y_indicator).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where([0 in y for y in Y])
one_class = np.where([1 in y for y in Y])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/odl/util/graphics.py | 1 | 15419 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Functions for graphical output."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
from odl.util.testutils import run_doctests
from odl.util.utility import is_real_dtype
__all__ = ('show_discrete_data',)
def _safe_minmax(values):
"""Calculate min and max of array with guards for nan and inf."""
# Nan and inf guarded min and max
minval = np.min(values[np.isfinite(values)])
maxval = np.max(values[np.isfinite(values)])
return minval, maxval
def _colorbar_ticks(minval, maxval):
"""Return the ticks (values show) in the colorbar."""
return [minval, (maxval + minval) / 2., maxval]
def _digits(minval, maxval):
"""Digits needed to comforatbly display values in [minval, maxval]"""
if minval == maxval:
return 3
else:
return min(10, max(2, int(1 + abs(np.log10(maxval - minval)))))
def _colorbar_format(minval, maxval):
"""Return the format string for the colorbar."""
return '%.{}f'.format(_digits(minval, maxval))
def _axes_info(grid, npoints=5):
result = []
min_pt = grid.min()
max_pt = grid.max()
for axis in range(grid.ndim):
xmin = min_pt[axis]
xmax = max_pt[axis]
points = np.linspace(xmin, xmax, npoints)
indices = np.linspace(0, grid.shape[axis] - 1, npoints, dtype=int)
tick_values = grid.coord_vectors[axis][indices]
# Do not use corner point in case of a partition, use outer corner
tick_values[[0, -1]] = xmin, xmax
format_str = '{:.' + str(_digits(xmin, xmax)) + 'f}'
tick_labels = [format_str.format(f) for f in tick_values]
result += [(points, tick_labels)]
return result
def show_discrete_data(values, grid, title=None, method='',
force_show=False, fig=None, **kwargs):
"""Display a discrete 1d or 2d function.
Parameters
----------
values : `numpy.ndarray`
The values to visualize
grid : `TensorGrid` or `RectPartition`
Grid of the values
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
'plot' : graph plot
'scatter' : scattered 2d points
(2nd axis <-> value)
2d methods:
'imshow' : image plot with coloring according to value,
including a colorbar.
'scatter' : cloud of scattered 3d points
(3rd axis <-> value)
'wireframe', 'plot_wireframe' : surface plot
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as the figure
given by this function. The most common usecase is that fig is the
return value from an earlier call to this function.
Default: New figure
interp : {'nearest', 'linear'}, optional
Interpolation method to use.
Default: 'nearest'
axis_labels : string, optional
Axis labels, default: ['x', 'y']
update_in_place : bool, optional
Update the content of the figure in place. Intended for faster real
time plotting, typically ~5 times faster.
This is only performed for ``method == 'imshow'`` with real data and
``fig != None``. Otherwise this parameter is treated as False.
Default: False
axis_fontsize : int, optional
Fontsize for the axes. Default: 16
kwargs : {'figsize', 'saveto', ...}
Extra keyword arguments passed on to display method
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
matplotlib.pyplot.plot : Show graph plot
matplotlib.pyplot.imshow : Show data as image
matplotlib.pyplot.scatter : Show scattered 3d points
"""
# Importing pyplot takes ~2 sec, only import when needed.
import matplotlib.pyplot as plt
args_re = []
args_im = []
dsp_kwargs = {}
sub_kwargs = {}
arrange_subplots = (121, 122) # horzontal arrangement
# Create axis labels which remember their original meaning
axis_labels = kwargs.pop('axis_labels', ['x', 'y'])
values_are_complex = not is_real_dtype(values.dtype)
figsize = kwargs.pop('figsize', None)
saveto = kwargs.pop('saveto', None)
interp = kwargs.pop('interp', 'nearest')
axis_fontsize = kwargs.pop('axis_fontsize', 16)
# Check if we should and can update the plot in place
update_in_place = kwargs.pop('update_in_place', False)
if (update_in_place and
(fig is None or values_are_complex or values.ndim != 2 or
(values.ndim == 2 and method not in ('', 'imshow')))):
update_in_place = False
if values.ndim == 1: # TODO: maybe a plotter class would be better
if not method:
if interp == 'nearest':
method = 'step'
dsp_kwargs['where'] = 'mid'
elif interp == 'linear':
method = 'plot'
else:
method = 'plot'
if method == 'plot' or method == 'step' or method == 'scatter':
args_re += [grid.coord_vectors[0], values.real]
args_im += [grid.coord_vectors[0], values.imag]
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
elif values.ndim == 2:
if not method:
method = 'imshow'
if method == 'imshow':
args_re = [np.rot90(values.real)]
args_im = [np.rot90(values.imag)] if values_are_complex else []
extent = [grid.min()[0], grid.max()[0],
grid.min()[1], grid.max()[1]]
if interp == 'nearest':
interpolation = 'nearest'
elif interp == 'linear':
interpolation = 'bilinear'
else:
interpolation = 'none'
dsp_kwargs.update({'interpolation': interpolation,
'cmap': 'bone',
'extent': extent,
'aspect': 'auto'})
elif method == 'scatter':
pts = grid.points()
args_re = [pts[:, 0], pts[:, 1], values.ravel().real]
args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag]
if values_are_complex else [])
sub_kwargs.update({'projection': '3d'})
elif method in ('wireframe', 'plot_wireframe'):
method = 'plot_wireframe'
x, y = grid.meshgrid
args_re = [x, y, np.rot90(values.real)]
args_im = ([x, y, np.rot90(values.imag)] if values_are_complex
else [])
sub_kwargs.update({'projection': '3d'})
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
else:
raise NotImplementedError('no method for {}d display implemented'
''.format(values.ndim))
# Additional keyword args are passed on to the display method
dsp_kwargs.update(**kwargs)
if fig is not None:
# Reuse figure if given as input
if not isinstance(fig, plt.Figure):
raise TypeError('`fig` {} not a matplotlib figure'.format(fig))
if not plt.fignum_exists(fig.number):
# If figure does not exist, user either closed the figure or
# is using IPython, in this case we need a new figure.
fig = plt.figure(figsize=figsize)
updatefig = False
else:
# Set current figure to given input
fig = plt.figure(fig.number)
updatefig = True
if values.ndim > 1 and not update_in_place:
# If the figure is larger than 1d, we can clear it since we
# dont reuse anything. Keeping it causes performance problems.
fig.clf()
else:
fig = plt.figure(figsize=figsize)
updatefig = False
if values_are_complex:
# Real
if len(fig.axes) == 0:
# Create new axis if needed
sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs)
sub_re.set_title('Real part')
sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_re.set_ylabel('value')
else:
sub_re = fig.axes[0]
display_re = getattr(sub_re, method)
csub_re = display_re(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 2:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_re, maxval_re = _safe_minmax(values.real)
else:
minval_re, maxval_re = kwargs['clim']
ticks_re = _colorbar_ticks(minval_re, maxval_re)
format_re = _colorbar_format(minval_re, maxval_re)
plt.colorbar(csub_re, orientation='horizontal',
ticks=ticks_re, format=format_re)
# Imaginary
if len(fig.axes) < 3:
sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs)
sub_im.set_title('Imaginary part')
sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_im.set_ylabel('value')
else:
sub_im = fig.axes[2]
display_im = getattr(sub_im, method)
csub_im = display_im(*args_im, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 4:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_im, maxval_im = _safe_minmax(values.imag)
else:
minval_im, maxval_im = kwargs['clim']
ticks_im = _colorbar_ticks(minval_im, maxval_im)
format_im = _colorbar_format(minval_im, maxval_im)
plt.colorbar(csub_im, orientation='horizontal',
ticks=ticks_im, format=format_im)
else:
if len(fig.axes) == 0:
# Create new axis object if needed
sub = plt.subplot(111, **sub_kwargs)
sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub.set_ylabel('value')
try:
# For 3d plots
sub.set_zlabel('z')
except AttributeError:
pass
else:
sub = fig.axes[0]
if update_in_place:
import matplotlib as mpl
imgs = [obj for obj in sub.get_children()
if isinstance(obj, mpl.image.AxesImage)]
if len(imgs) > 0 and updatefig:
imgs[0].set_data(args_re[0])
csub = imgs[0]
# Update min-max
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
csub.set_clim(minval, maxval)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow':
# Add colorbar
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
ticks = _colorbar_ticks(minval, maxval)
format = _colorbar_format(minval, maxval)
if len(fig.axes) < 2:
# Create colorbar if none seems to exist
plt.colorbar(mappable=csub, ticks=ticks, format=format)
elif update_in_place:
# If it exists and we should update it
csub.colorbar.set_clim(minval, maxval)
csub.colorbar.set_ticks(ticks)
csub.colorbar.set_ticklabels([format % tick for tick in ticks])
csub.colorbar.draw_all()
# Fixes overlapping stuff at the expense of potentially squashed subplots
if not update_in_place:
fig.tight_layout()
if title is not None:
if not values_are_complex:
# Do not overwrite title for complex values
plt.title(title)
fig.canvas.manager.set_window_title(title)
if updatefig or plt.isinteractive():
# If we are running in interactive mode, we can always show the fig
# This causes an artifact, where users of `CallbackShow` without
# interactive mode only shows the figure after the second iteration.
plt.show(block=False)
if not update_in_place:
plt.draw()
plt.pause(0.0001)
else:
try:
sub.draw_artist(csub)
fig.canvas.blit(fig.bbox)
fig.canvas.update()
fig.canvas.flush_events()
except AttributeError:
plt.draw()
plt.pause(0.0001)
if force_show:
plt.show()
if saveto is not None:
fig.savefig(saveto)
return fig
if __name__ == '__main__':
run_doctests()
| gpl-3.0 |
DistrictDataLabs/django-data-product | irisfinder/views.py | 1 | 1948 | from django.shortcuts import render
import datetime
from models import Iris, SVMModels
from forms import UserIrisData
import sklearn
from sklearn import svm
from sklearn.cross_validation import train_test_split
import numpy as np
from django.conf import settings
import cPickle
import scipy
from pytz import timezone
import random
# Create your views here.
def predict(request):
data = {
"app_name": "irisfinder",
"random_number": random.randint(0, 10000)
}
if request.method == "GET":
form = UserIrisData()
data.update({"form": form, "submit": True})
elif request.method == "POST":
form = UserIrisData(request.POST)
sepal_length = request.POST.get("sepal_length")
sepal_width = request.POST.get("sepal_width")
petal_length = request.POST.get("petal_length")
petal_width = request.POST.get("petal_width")
if request.POST.get('submit'):
user_data = Iris(user_data=True,
sepal_length=sepal_length,
sepal_width=sepal_width,
petal_length=petal_length,
petal_width=petal_width)
user_data.save()
model_object = SVMModels.objects.order_by("-run_date").first()
model = cPickle.loads(model_object.model_pickle)
prediction = model.predict([sepal_length, sepal_width, petal_length, petal_width])
item_pk = user_data.pk
species = prediction[0]
data.update({"form": form, "verify": True, "item_pk": item_pk,
"species": species, "prediction": prediction[0]})
elif request.POST.get('verified'):
user_data = Iris.objects.get(pk=int(request.POST.get("item_pk")))
user_data.species = request.POST.get("species")
user_data.save()
return render(request, "predict.html", context=data) | apache-2.0 |
nvoron23/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/linear_model/logistic.py | 9 | 67760 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the cross-
entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The newton-cg, sag
and lbfgs solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
google-research/google-research | smu/parser/smu_utils_lib_test.py | 1 | 35529 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'),
899650)
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'),
899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=['name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1',
'name2', 'stoich2', 'btid2', 'shortconfid2', 'confid2'],
data=[
['x07_c2n2o2fh3.224227.004',
'c2n2o2fh3', 224227, 4, 224227004,
'x07_c2n2o2fh3.224176.005',
'c2n2o2fh3', 224176, 5, 224176005],
['x07_c2n2o2fh3.260543.005',
'c2n2o2fh3', 260543, 5, 260543005,
'x07_c2n2o2fh3.224050.001',
'c2n2o2fh3', 224050, 1, 224050001],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by = 123
self.duplicate_conformer.duplicate_of.extend([111, 222])
def test_two_stage2(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage2_conformer)
def test_two_stage1(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage1_conformer)
def test_two_duplicates(self):
duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)
duplicate_conformer2.duplicate_of[:] = [333, 444]
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.duplicate_conformer, duplicate_conformer2)
self.assertIsNone(got_conflict)
self.assertEqual(123, got_conf.duplicated_by)
self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)
def test_stage2_stage1(self):
# Add a duplicate to stage1 to make sure it is copied
self.stage1_conformer.duplicate_of.append(999)
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [999])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_energy(self):
self.stage2_conformer.properties.initial_geometry_energy.value = -1.23
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
# This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)
def test_stage2_stage1_conflict_error_codes(self):
self.stage2_conformer.properties.errors.error_nstat1 = 999
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
999, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_missing_geometry(self):
self.stage2_conformer.ClearField('optimized_geometry')
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, False
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_no_conflict_minus1(self):
# If stage2 contains a -1, we keep that (stricter error checking later on)
self.stage2_conformer.properties.initial_geometry_energy.value = -1.0
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)
def test_stage2_stage1_no_conflict_approx_equal(self):
self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage1_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage1_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))
def test_multiple_initial_geometries(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_multiple_bond_topologies(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_different_bond_topologies(self):
self.stage1_conformer.bond_topologies[0].atoms[0] = (
dataset_pb2.BondTopology.ATOM_H)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage1_conformer)
class ConformerErrorTest(absltest.TestCase):
def test_stage1_no_error(self):
conformer = get_stage1_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage1_error(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_frequencies = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_no_error(self):
conformer = get_stage2_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_1_expected_field(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_rotational_modes = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_0_expected_field(self):
conformer = get_stage2_conformer()
# This field is 0 to indicate no error. Why the discrepancy? Who knows!
conformer.properties.errors.error_nsvg09 = 1
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_nstat1_is_3(self):
# This is the other bizaare case. nstat1 of 3 is still considered success.
conformer = get_stage2_conformer()
conformer.properties.errors.error_nstat1 = 3
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
class FilterConformerByAvailabilityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = dataset_pb2.Conformer()
properties = self.conformer.properties
# A STANDARD field
properties.single_point_energy_pbe0d3_6_311gd.value = 1.23
# A COMPLETE field
properties.homo_pbe0_aug_pc_1.value = 1.23
# An INTERNAL_ONLY field
properties.nuclear_repulsion_energy.value = 1.23
def test_standard(self):
smu_utils_lib.filter_conformer_by_availability(self.conformer,
[dataset_pb2.STANDARD])
self.assertTrue(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertFalse(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
def test_complete_and_internal_only(self):
smu_utils_lib.filter_conformer_by_availability(
self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])
self.assertFalse(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertTrue(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertTrue(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
class ConformerToStandardTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_field_filtering(self):
# Check that the field which should be filtered starts out set
self.assertTrue(self.conformer.properties.HasField(
'single_point_energy_hf_6_31gd'))
got = smu_utils_lib.conformer_to_standard(self.conformer)
# Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(
got.properties.HasField('single_point_energy_hf_6_31gd'))
def test_remove_error_conformer(self):
self.conformer.properties.errors.error_frequencies = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
def test_remove_duplicate(self):
self.conformer.duplicated_by = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
class DetermineFateTest(parameterized.TestCase):
def test_duplicate_same_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
def test_duplicate_different_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1000
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),
(5, dataset_pb2.Conformer.FATE_DISASSOCIATED),
(4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),
(6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))
def test_geometry_failures(self, nstat1, expected_fate):
conformer = get_stage1_conformer()
conformer.properties.errors.error_nstat1 = nstat1
self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))
def test_no_result(self):
conformer = get_stage1_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,
smu_utils_lib.determine_fate(conformer))
def test_calculation_errors(self):
conformer = get_stage2_conformer()
# This is a random choice of an error to set. I just need some error.
conformer.properties.errors.error_atomic_analysis = 999
self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR,
smu_utils_lib.determine_fate(conformer))
def test_success(self):
conformer = get_stage2_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,
smu_utils_lib.determine_fate(conformer))
class ToBondTopologySummaryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_dup_same(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_same_topology, 1)
def test_dup_diff(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_different_topology, 1)
def test_geometry_failed(self):
self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_failed_geometry_optimization, 1)
def test_missing_calculation(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_kept_geometry, 1)
self.assertEqual(got[0].count_missing_calculation, 1)
def test_calculation_with_error(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_with_error, 0)
self.assertEqual(got[0].count_detected_match_with_error, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_with_error, 1)
self.assertEqual(got[1].count_detected_match_with_error, 0)
def test_calculation_success(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_success, 0)
self.assertEqual(got[0].count_detected_match_success, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_success, 1)
self.assertEqual(got[1].count_detected_match_success, 0)
class LabeledSmilesTester(absltest.TestCase):
def test_atom_labels(self):
mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)
self.assertIsNotNone(mol)
smiles_before = Chem.MolToSmiles(mol)
self.assertEqual(
smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')
# Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
for atom in mol.GetAtoms():
self.assertEqual(atom.GetAtomMapNum(), 0)
self.assertEqual(Chem.MolToSmiles(mol), smiles_before)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
fyffyt/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
cyliustack/sofa | bin/sofa_analyze.py | 1 | 50661 | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
sample_time = (1 / float(cfg.sys_mon_rate))
while t < t_end:
t = t + sample_time
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - sample_time
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_idl = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(sample_time*int(ratios[1])/100.0)
mp_sys.append(sample_time*int(ratios[2])/100.0)
mp_idl.append(sample_time*int(ratios[3])/100.0)
mp_iow.append(sample_time*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_idl = np.asarray(mp_idl)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
else:
total_elapsed_time['idl'] += sample_time
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
mp_idl.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
if num_gpus > 0:
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
#diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
diskstat_dev = diskstat_table.index.format()
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
print('Complete!!')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = tmp_dir[0:-1] + '-' + ip + '/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tseries/tests/test_frequencies.py | 9 | 25284 | from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
hanteng/babel | scripts/geoname_cldr.py | 1 | 2479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# url_target="https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv"
import csv
import pandas as pd
import codecs
def export_to_csv(df, ex_filename, sep=','):
if sep==',':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_ALL, na_rep='{na}', encoding='utf-8') #+'.csv'
if sep=='\t':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_NONE, na_rep='{na}', encoding='utf-8') #+'.tsv' , escapechar="'", quotechar=""
def import_from_babel_cldr():
from babel import Locale
#staring from the en-US to retrieve keys
locale = Locale('en', 'US')
completelist_territories = locale.territories.keys()
completelist_languages = locale.languages.keys()
#intiate the output dataframe from this
df_cldr=pd.DataFrame.from_dict(locale.territories, orient="index")
df_cldr.index.name='geocode'
df_cldr.columns = ['name_en']
df_cldr.sort_index(inplace=True)
for i_lang in completelist_languages:
#print(i_lang)
try:
locale = Locale.parse(i_lang)
df=pd.DataFrame.from_dict(locale.territories, orient="index")
df.columns = ['name_{0}'.format(i_lang)]
df.sort_index(inplace=True)
df_cldr=df_cldr.join(df)
except:
pass
return df_cldr
###################### MAIN ########################
import os
path_script=os.path.dirname(os.path.abspath(__file__))
#print path_script
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Fetch and generate the country and territory names in languages that are supported by the Unicode CLDR 25.""")
parser.add_argument("-o", "--output", dest="outputpath", default="geoname_CLDR25_babel.csv",
help="write data to a csv file or a tsv file", metavar="OUTPUTPATH")
args = parser.parse_args()
fn = args.outputpath
#print fn
df_cldr=import_from_babel_cldr()
if fn[-3:]=='csv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep=',')
elif fn[-3:]=='tsv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep='\t')
else:
print ("Only csv and tsv formats can be generated. Sorry.")
| bsd-3-clause |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py | 1 | 2628 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| mit |
NZRS/content-analysis | netflix.py | 2 | 3126 | from bs4 import BeautifulSoup
from urllib2 import quote
import unicodedata
import requests
import json
import glob
import pandas as pd
movie_list = []
for page in glob.glob('*.html'):
with open(page, 'r+') as f:
my_page = f.read()
my_soup = BeautifulSoup(my_page)
for div in my_soup.find_all('div', class_='lockup'):
try:
movie_list.append(div.img.get('alt'))
except:
movie_list.append('movie could not be extracted from page')
['movie could not be extracted from page' for movie in movie_list if movie is None]
movie_list2 = []
for movie in movie_list:
try:
movie = quote(movie)
movie_list2.append(movie)
except:
try:
movie = unicodedata.normalize('NFKC', movie).encode('ascii','ignore')
movie = quote(movie)
movie_list2.append(movie)
except:
print movie
movie_list2.append('movie could not be processed')
all_movies_us = {}
for movie in movie_list2:
try:
query_url = 'http://www.omdbapi.com/?t=' + movie + '&y=&plot=full&r=json'
response = requests.get(query_url)
my_dict = json.loads(response.text)
all_movies_us[movie] = my_dict
except:
all_movies_us[movie] = 'No response'
print movie
# movies/single year shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
if len(v['Year']) == 4:
try:
years_dict[v['Year']] += 1
except:
years_dict[v['Year']] = 1
continue
except:
counter += 1
continue
print counter
my_frame = pd.DataFrame.from_dict(years_dict, orient = 'index')
my_frame.to_csv('single_years.csv')
counter=0
score_dict = {}
for k,v in all_movies.iteritems():
try:
if v['imdbRating'] != 'N/A':
score_dict[v['Title']] = v['imdbRating']
except:
counter +=1
continue
print counter
score_dict2 ={}
for title, score in score_dict.iteritems():
try:
score_dict2[title] = float(score)
except:
print score
score_dict = score_dict2
average_score = (sum(score_dict.values()))/len(score_dict)
top_25 =
print average_score
years = []
country = []
language =[]
actors = []
for movie, results in all_movies.iteritems():
try:
years.append(results['Year'])
except:
continue
try:
country.append(results['Country'])
except:
continue
try:
language.append(results['Language'])
except:
continue
try:
for actor in results['Actors'].split(','):
actors.append(actor)
except:
continue
# Ongoing shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
print v['Year'][4]
except:
continue
# Languages
lang_list = []
for lang in language:
for x in lang.split(','):
lang_list.append(x)
lang_list
Counter(lang_list)
Counter(lang_list).most_common(10)
| agpl-3.0 |
einarhuseby/arctic | tests/integration/test_arctic.py | 4 | 6898 | from datetime import datetime as dt, timedelta as dtd
from mock import patch
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import pytest
import time
import numpy as np
from arctic.arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException, QuotaExceededException
from ..util import get_large_ts
def test_connect_to_Arctic_string(mongo_host):
arctic = Arctic(mongo_host=mongo_host)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_connect_to_Arctic_connection(mongodb, mongo_host):
arctic = Arctic(mongodb)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_simple(library):
sym = 'symbol'
data = get_large_ts(100)
library.write(sym, data)
orig = dt.now()
time.sleep(1) # Move the timestamp on 1ms
data2 = get_large_ts(100)
library.write(sym, data2, prune_previous_version=False)
# Get the timeseries, it should be the same
read2 = library.read(sym).data
assert_frame_equal(read2, data2)
# Ensure we can get the previous version
read = library.read(sym, as_of=orig).data
assert_frame_equal(read, data)
def test_indexes(arctic):
c = arctic._conn
arctic.initialize_library("library", VERSION_STORE, segment='month')
chunk = c.arctic.library.index_information()
assert chunk == {u'_id_': {u'key': [(u'_id', 1)], u'ns': u'arctic.library', u'v': 1},
u'symbol_1_parent_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1),
(u'parent', 1),
(u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_1_sha_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_hashed': {u'background': True,
u'key': [(u'symbol', u'hashed')],
u'ns': u'arctic.library',
u'v': 1}}
snapshots = c.arctic.library.snapshots.index_information()
assert snapshots == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.snapshots',
u'v': 1},
u'name_1': {u'background': True,
u'key': [(u'name', 1)],
u'ns': u'arctic.library.snapshots',
u'unique': True,
u'v': 1}}
versions = c.arctic.library.versions.index_information()
assert versions == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1__id_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'_id', -1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1_version_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'version', -1)],
u'ns': u'arctic.library.versions',
u'unique': True,
u'v': 1}}
version_nums = c.arctic.library.version_nums.index_information()
assert version_nums == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.version_nums',
u'v': 1},
u'symbol_1': {u'background': True,
u'key': [(u'symbol', 1)],
u'ns': u'arctic.library.version_nums',
u'unique': True,
u'v': 1}}
def test_delete_library(arctic, library, library_name):
mongo = arctic._conn
# create a library2 library too - ensure that this isn't deleted
arctic.initialize_library('user.library2', VERSION_STORE, segment='month')
library.write('asdf', get_large_ts(1))
assert 'TEST' in mongo.arctic_test.collection_names()
assert 'TEST.versions' in mongo.arctic_test.collection_names()
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
arctic.delete_library(library_name)
assert 'TEST' not in mongo.arctic_user.collection_names()
assert 'TEST.versions' not in mongo.arctic_user.collection_names()
with pytest.raises(LibraryNotFoundException):
arctic[library_name]
with pytest.raises(LibraryNotFoundException):
arctic['arctic_{}'.format(library_name)]
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
def test_quota(arctic, library, library_name):
thing = list(range(100))
library._arctic_lib.set_quota(10)
assert arctic.get_quota(library_name) == 10
assert library._arctic_lib.get_quota() == 10
library.write('thing', thing)
with pytest.raises(QuotaExceededException):
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
with pytest.raises(QuotaExceededException):
arctic.check_quota(library_name)
def test_check_quota(arctic, library, library_name):
with patch('arctic.arctic.logger.info') as info:
arctic.check_quota(library_name)
assert info.call_count == 1
def test_default_mongo_retry_timout():
now = time.time()
with pytest.raises(LibraryNotFoundException):
Arctic('unresolved-host', serverSelectionTimeoutMS=0)['some.lib']
assert time.time() - now < 1.
| lgpl-2.1 |
BlueBrain/NEST | testsuite/manualtests/cross_check_test_mip_corrdet.py | 13 | 2594 | # -*- coding: utf-8 -*-
#
# cross_check_test_mip_corrdet.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Script to check correlation_detector.
# Calculates spike cross correlation function of both spike trains in
# spike_detector-0-0-3.gdf. The file is generated after running the
# testscript testsuite/unittests/test_mip_corrdet.sli
#
# Author: Helias
# Date: 08-04-07
#
from scipy import *
from matplotlib.pylab import * # for plot
# Auto- and crosscorrelation functions for spike trains.
#
# A time bin of size tbin is centered around the time difference it
# represents If the correlation function is calculated for tau in
# [-tau_max, tau_max], the pair events contributing to the left-most
# bin are those for which tau in [-tau_max-tbin/2, tau_max+tbin/2) and
# so on.
# correlate two spike trains with each other
# assumes spike times to be ordered in time
# tau > 0 means spike2 is later than spike1
#
# tau_max: maximum time lag in ms correlation function
# tbin: bin size
# spike1: first spike train [tspike...]
# spike2: second spike train [tspike...]
#
def corr_spikes_sorted(spike1, spike2, tbin, tau_max, h):
tau_max_i = int(tau_max/h)
tbin_i = int(tbin/h)
cross = zeros(int(2*tau_max_i/tbin_i+1), 'd')
j0 = 0
for spki in spike1:
j = j0
while j < len(spike2) and spike2[j] - spki < -tau_max_i - tbin_i/2.0:
j += 1
j0 = j
while j < len(spike2) and spike2[j] - spki < tau_max_i + tbin_i/2.0:
cross[int((spike2[j] - spki + tau_max_i + 0.5*tbin_i)/tbin_i)] += 1.0
j += 1
return cross
def main():
# resolution
h = 0.1
tau_max = 100.0 # ms correlation window
t_bin = 10.0 # ms bin size
# read input from spike detector
spikes = load('spike_detector-0-0-3.gdf')
sp1 = spikes[find(spikes[:,0] == 4), 1]
sp2 = spikes[find(spikes[:,0] == 5), 1]
cross = corr_spikes_sorted(sp1, sp2, t_bin, tau_max, h)
print cross
print sum(cross)
main()
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
ofgulban/scikit-image | doc/examples/filters/plot_rank_mean.py | 7 | 1525 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = data.coins()
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| bsd-3-clause |
tienjunhsu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
igabriel85/dmon-adp | adpformater/adpformater.py | 1 | 1615 | import pandas as pd
class DataFormatter():
def __init__(self, dataloc):
self.dataloc = dataloc
def aggJsonToCsv(self):
return "CSV file"
def expTimestamp(self):
return "Expand metric timestamp"
def window(self):
return "Window metrics"
def pivot(self):
return "Pivot values"
def addID(self):
return "Add new ID as index"
def removeID(self):
return "Remove selected column as index"
def renameHeader(self):
return "Rename headers"
def normalize(self):
return "Normalize data"
def denormalize(self):
return "Denormalize data"
input_table = pd.read_csv("metrics.csv")
for index, row in input_table.iterrows():
input_table = input_table.append([row]*9)
input_table = input_table.sort_values(['row ID'])
input_table = input_table.reset_index(drop=True)
for index, rows in input_table.iterrows():
if int(index) > 59:
print "Index to big!"
time = rows[0].split(", ", 1) #In Knime row for timestamp is row(55) last one
timeHour = time[1].split(":", 2)
timeHourSeconds = timeHour[2].split(".", 1)
timeHourSecondsDecimal = timeHour[2].split(".", 1)
timeHourSecondsDecimal[0] = str(index)
if len(timeHourSecondsDecimal[0]) == 1:
timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0]
decimal = '.'.join(timeHourSecondsDecimal)
timeHour[2] = decimal
timenew = ':'.join(timeHour)
time[1] = timenew
finalString = ', '.join(time)
input_table.set_value(index, 'row ID', finalString)
input_table.to_csv('out.csv')
| apache-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/sql.py | 7 | 58343 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| agpl-3.0 |
rboyes/KerasScripts | CSVTrainer.py | 1 | 5321 | import os
import datetime
import sys
import time
import string
import random
import pandas as pd
import numpy as np
import gc
if(len(sys.argv) < 2):
print('Usage: CSVTrainer.py train.csv validation.csv model.h5 log.txt')
sys.exit(1)
trainingName = sys.argv[1]
validationName = sys.argv[2]
modelName = sys.argv[3]
logName = sys.argv[4]
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import *
import keras.preprocessing.image as image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.models import Model, load_model
from keras import regularizers
from keras import backend as K
from keras.utils.data_utils import get_file
from sklearn.metrics import accuracy_score
from keras.applications import resnet50
def readCSV(fileList):
namesDataFrame = pd.read_csv(fileList)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = sorted(list(set(flatten([l.split(' ') for l in namesDataFrame['tags'].values]))))
labelMap = {l: i for i, l in enumerate(labels)}
numberOfLabels = len(labels)
numberOfImages = len(namesDataFrame)
fileNames = []
y = np.zeros((numberOfImages, numberOfLabels), np.float32)
for index in range(0, numberOfImages):
inputImage = image.img_to_array(image.load_img(namesDataFrame.iloc[index][0]))
fileNames.append(namesDataFrame.iloc[index][0])
tags = namesDataFrame.iloc[index][1]
for t in tags.split(' '):
y[index, labelMap[t]] = 1.0
return (fileNames, y, labelMap)
print('Loading images..........', end = '',flush = True)
(trainingFileNames, trainY, trainingLabelMap) = readCSV(trainingName)
(validationFileNames, validationY, validationLabelMap) = readCSV(validationName)
print('done.', flush = True)
if len(trainingLabelMap) != len(validationLabelMap):
print("Label maps for training and validation are not equal")
sys.exit(1)
numberOfTrainingImages = len(trainingFileNames)
numberOfValidationImages = len(validationFileNames)
numberOfChannels = 3
nx = 256
ny = 256
batchSize = 25
lossName = 'binary_crossentropy'
activationName = 'sigmoid'
resnetModel = resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(numberOfChannels, nx, ny))
print('The number of layers in the resnet model = %d' % (len(resnetModel.layers)))
bottleneckTrainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckValidationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckTrainingGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(trainingFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckValidationGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(validationFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckTrainingFeatures = resnetModel.predict_generator(bottleneckTrainingGenerator, numberOfTrainingImages)
bottleneckValidationFeatures = resnetModel.predict_generator(bottleneckValidationGenerator, numberOfValidationImages)
newTop = Sequential()
newTop.add(Flatten(input_shape = bottleneckTrainingFeatures.shape[1:]))
newTop.add(Dense(512, activation='relu'))
newTop.add(Dropout(0.5))
newTop.add(Dense(len(trainingLabelMap), activation=activationName, name='predictions'))
newTop.compile(loss=lossName, optimizer=Adam(lr=1.0E-3))
print('Fitting predicted features...', flush = True)
newTop.fit(bottleneckTrainingFeatures, trainY, validation_data = (bottleneckValidationFeatures, validationY), verbose = 1, batch_size = batchSize, nb_epoch = 25)
print('Done.', flush = True)
finalModel = Model(input = resnetModel.input, output = newTop(resnetModel.output))
print('The number of layers in the final model = %d' % (len(finalModel.layers)))
for layer in finalModel.layers[:(len(resnetModel.layers) - 21)]:
layer.trainable = False
finalModel.compile(loss=lossName,optimizer=SGD(lr=1e-4, momentum=0.9))
print(finalModel.summary())
# Could add vertical_flip = True
trainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0, rotation_range = 40, zoom_range = 0.15, horizontal_flip = True,
width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.1)
validationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
trainingGenerator = trainingDataGenerator.flow_from_filenames(trainingFileNames, trainY, batch_size = batchSize, target_size = (nx, ny))
validationGenerator = validationDataGenerator.flow_from_filenames(validationFileNames, validationY, batch_size = batchSize, target_size = (nx, ny))
csvLogger = CSVLogger(logName, append=True)
checkPointer = ModelCheckpoint(filepath=modelName, verbose = 1, save_best_only = True)
finalModel.fit_generator(trainingGenerator, numberOfTrainingImages, 50, validation_data = validationGenerator,
nb_val_samples = numberOfValidationImages, callbacks = [checkPointer, csvLogger])
| apache-2.0 |
saimn/astropy | astropy/visualization/wcsaxes/frame.py | 8 | 10649 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self.data = None
self.pixel = None
self.world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = value
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams['axes.linewidth']
self._color = rcParams['axes.edgecolor']
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor=rcParams['axes.facecolor'], edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = self.spine_class(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'bt'
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000,
transform=self.parent_axes.transData)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/io/json/table_schema.py | 12 | 5184 | """
Table Schema builders
http://specs.frictionlessdata.io/json-table-schema/
"""
from pandas.core.dtypes.common import (
is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype,
is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_categorical_dtype, is_period_dtype, is_string_dtype
)
def as_json_table_type(x):
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : array or dtype
Returns
-------
t : str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return 'integer'
elif is_bool_dtype(x):
return 'boolean'
elif is_numeric_dtype(x):
return 'number'
elif (is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or
is_period_dtype(x)):
return 'datetime'
elif is_timedelta64_dtype(x):
return 'duration'
elif is_categorical_dtype(x):
return 'any'
elif is_string_dtype(x):
return 'string'
else:
return 'any'
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if all(name is not None for name in data.index.names):
return data
data = data.copy()
if data.index.nlevels > 1:
names = [name if name is not None else 'level_{}'.format(i)
for i, name in enumerate(data.index.names)]
data.index.names = names
else:
data.index.name = data.index.name or 'index'
return data
def make_field(arr, dtype=None):
dtype = dtype or arr.dtype
if arr.name is None:
name = 'values'
else:
name = arr.name
field = {'name': name,
'type': as_json_table_type(dtype)}
if is_categorical_dtype(arr):
if hasattr(arr, 'categories'):
cats = arr.categories
ordered = arr.ordered
else:
cats = arr.cat.categories
ordered = arr.cat.ordered
field['constraints'] = {"enum": list(cats)}
field['ordered'] = ordered
elif is_period_dtype(arr):
field['freq'] = arr.freqstr
elif is_datetime64tz_dtype(arr):
if hasattr(arr, 'dt'):
field['tz'] = arr.dt.tz.zone
else:
field['tz'] = arr.tz.zone
return field
def build_table_schema(data, index=True, primary_key=None, version=True):
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the secnods field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
"""
if index is True:
data = set_default_names(data)
schema = {}
fields = []
if index:
if data.index.nlevels > 1:
for level in data.index.levels:
fields.append(make_field(level))
else:
fields.append(make_field(data.index))
if data.ndim > 1:
for column, s in data.iteritems():
fields.append(make_field(s))
else:
fields.append(make_field(data))
schema['fields'] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema['primaryKey'] = [data.index.name]
else:
schema['primaryKey'] = data.index.names
elif primary_key is not None:
schema['primaryKey'] = primary_key
if version:
schema['pandas_version'] = '0.20.0'
return schema
| mit |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
googleinterns/cabby | cabby/model/datasets.py | 1 | 4391 | # coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import os
import pandas as pd
from sklearn.utils import shuffle
from cabby.geo import regions
from cabby.geo import util as gutil
class RUNDataset:
def __init__(self, data_dir: str, s2level: int, lines: bool = False):
train_ds, valid_ds, test_ds, ds = self.load_data(data_dir, lines=lines)
# Get labels.
map_1 = regions.get_region("RUN-map1")
map_2 = regions.get_region("RUN-map2")
map_3 = regions.get_region("RUN-map3")
logging.info(map_1.polygon.wkt)
logging.info(map_2.polygon.wkt)
logging.info(map_3.polygon.wkt)
unique_cellid_map_1 = gutil.cellids_from_polygon(map_1.polygon, s2level)
unique_cellid_map_2 = gutil.cellids_from_polygon(map_2.polygon, s2level)
unique_cellid_map_3 = gutil.cellids_from_polygon(map_3.polygon, s2level)
unique_cellid = (
unique_cellid_map_1 + unique_cellid_map_2 + unique_cellid_map_3)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.ds = ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
def load_data(self, data_dir: str, lines: bool):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
ds['instructions'] = ds.groupby(
['id'])['instruction'].transform(lambda x: ' '.join(x))
ds = ds.drop_duplicates(subset='id', keep="last")
columns_keep = ds.columns.difference(
['map', 'id', 'instructions', 'end_point', 'start_point'])
ds.drop(columns_keep, 1, inplace=True)
ds = shuffle(ds)
ds.reset_index(inplace=True, drop=True)
dataset_size = ds.shape[0]
logging.info(f"Size of dataset: {ds.shape[0]}")
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
return train_ds, valid_ds, test_ds, ds
class RVSDataset:
def __init__(self, data_dir: str, s2level: int, region: str, lines: bool = True):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
logging.info(f"Size of dataset before removal of duplication: {ds.shape[0]}")
ds = pd.concat([ds.drop(['geo_landmarks'], axis=1), ds['geo_landmarks'].apply(pd.Series)], axis=1)
lengths = ds.end_point.apply(lambda x: x if len(x) == 3 else "").tolist()
ds['end_osmid'] = ds.end_point.apply(lambda x: x[1])
ds['start_osmid'] = ds.start_point.apply(lambda x: x[1])
ds['end_pivot'] = ds.end_point
ds['end_point'] = ds.end_point.apply(lambda x: x[3])
ds['start_point'] = ds.start_point.apply(lambda x: x[3])
ds = ds.drop_duplicates(subset=['end_osmid', 'start_osmid'], keep='last')
logging.info(f"Size of dataset after removal of duplication: {ds.shape[0]}")
dataset_size = ds.shape[0]
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
# Get labels.
active_region = regions.get_region(region)
unique_cellid = gutil.cellids_from_polygon(active_region.polygon, s2level)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
| apache-2.0 |
georgid/sms-tools | lectures/7-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 2 | 2997 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
sriki18/scipy | scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
zseder/hunmisc | hunmisc/utils/plotting/matplotlib_simple_xy.py | 1 | 1535 | """
Copyright 2011-13 Attila Zseder
Email: zseder@gmail.com
This file is part of hunmisc project
url: https://github.com/zseder/hunmisc
hunmisc is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import matplotlib.pyplot as plt
from matplotlib import rc
def read_data(istream):
r = [[],[],[],[],[]]
for l in istream:
le = l.strip().split()
[r[i].append(le[i]) for i in xrange(len(le))]
return r
def main():
d = read_data(open(sys.argv[1]))
rc('font', size=14)
ax = plt.subplot(111)
ax.plot(d[0], d[1], label="$M$", linewidth=2)
ax.plot(d[0], d[2], label="$l KL$", linewidth=2)
ax.plot(d[0], d[3], label="$l (H_q+KL)$", linewidth=2)
ax.plot(d[0], d[4], label="$M + l (H_q+KL)$", linewidth=2)
plt.xlabel("Bits")
ax.legend(loc=7)
plt.show()
#plt.savefig("fig.png")
if __name__ == "__main__":
main()
| gpl-3.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <mdroe@stsci.edu>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| agpl-3.0 |
appapantula/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
idlead/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
abhishekkrthakur/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
ati-ozgur/KDD99ReviewArticle | HelperCodes/create_table_JournalAndArticleCounts.py | 1 | 1930 | import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
#df_journal = df.groupby('journal')["ID"]
dfJournalList = df.groupby(['journal'])['ID'].count().order(ascending=False)
isOdd = (dfJournalList.size % 2 == 1)
if (isOdd):
table_row_length = dfJournalList.size / 2 +1
else:
table_row_length = dfJournalList.size / 2
table_content_inside=""
for index in range(table_row_length):
journal_name_1column = dfJournalList.index[index]
journal_count_1column = dfJournalList[index]
second_column_index = index + table_row_length
if(second_column_index < dfJournalList.size):
journal_name_2column = dfJournalList.index[second_column_index]
journal_count_2column = dfJournalList[second_column_index]
else:
journal_name_2column = ""
journal_count_2column = ""
line = "{journal_name_1column} & {journal_count_1column} & {journal_name_2column} & {journal_count_2column} \\\\ \n".format(
journal_name_1column = journal_name_1column
,journal_count_1column = journal_count_1column
,journal_name_2column = journal_name_2column
,journal_count_2column = journal_count_2column
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table*}[!ht]
\\caption{ \\textbf{Journals and Article Counts} }
\\label{table-JournalAndArticleCounts}
\\centering
\\begin{adjustbox}{max width=\\textwidth}
\\normalsize
\\begin{tabular}{llll}
\\toprule
Journal Name & Article Count & Journal Name & Article Count \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{adjustbox}
\\end{table*}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
filename = "../latex/table-JournalAndArticleCounts.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
| mit |
arthurmensch/modl | benchmarks/log.py | 1 | 2179 | import time
import numpy as np
from lightning.impl.primal_cd import CDClassifier
from lightning.impl.sag import SAGAClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from lightning.classification import SAGClassifier
from sklearn.linear_model import LogisticRegression
bunch = fetch_20newsgroups_vectorized(subset="all")
X = bunch.data
y = bunch.target
y[y >= 1] = 1
alpha = 1e-3
n_samples = X.shape[0]
sag = SAGClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
saga = SAGAClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
cd_classifier = CDClassifier(loss='log',
alpha=alpha / 2,
C=1 / n_samples,
tol=1e-10,
max_iter=100,
verbose=1,
random_state=0)
sklearn_sag = LogisticRegression(tol=1e-10, max_iter=1000,
verbose=2, random_state=0,
C=1. / (n_samples * alpha),
solver='sag',
penalty='l2',
fit_intercept=False)
classifiers = [{'name': 'Lightning SAG', 'estimator': sag},
{'name': 'Lightning SAGA', 'estimator': saga},
{'name': 'Sklearn SAG', 'estimator': sklearn_sag},
{'name': 'Lightning CD', 'estimator': cd_classifier},
]
start = time.time()
for classifier in classifiers:
print(classifier['name'])
clf = classifier['estimator']
clf.fit(X, y)
print("Training time", time.time() - start)
print("Accuracy", np.mean(clf.predict(X) == y))
n_nz = np.sum(np.sum(clf.coef_ != 0, axis=0, dtype=bool))
n_nz /= clf.coef_.size
print(clf.coef_)
print('Non-zero', n_nz)
| bsd-2-clause |
hansonrobotics/chatbot | src/chatbot/stats.py | 1 | 3618 | import os
import logging
import pandas as pd
import glob
import re
import datetime as dt
from collections import Counter
logger = logging.getLogger('hr.chatbot.stats')
trace_pattern = re.compile(
r'../(?P<fname>.*), (?P<tloc>\(.*\)), (?P<pname>.*), (?P<ploc>\(.*\))')
def collect_history_data(history_dir, days):
today = dt.datetime.utcnow()
dfs = []
for d in glob.glob('{}/*'.format(history_dir)):
if os.path.isdir(d):
dirname = os.path.basename(d)
dirdate = None
try:
dirdate = dt.datetime.strptime(dirname, '%Y%m%d')
except Exception as ex:
logger.error(ex)
if dirdate and (days == -1 or (today - dirdate).days < days):
for fname in glob.glob('{}/{}/*.csv'.format(history_dir, dirname)):
try:
dfs.append(pd.read_csv(fname))
except Exception as ex:
logger.warn("Reading {} error: {}".format(fname, ex))
if not dfs:
return None
df = pd.concat(dfs, ignore_index=True)
df = df[df.Datetime != 'Datetime'].sort(
['User', 'Datetime']).drop_duplicates()
return df
def history_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
if days == -1:
stats_csv = '{}/full_history.csv'.format(history_dir)
else:
stats_csv = '{}/last_{}_days.csv'.format(history_dir, days)
columns = [u'Datetime', u'Revision', u'User', u'BotName',
u'AnsweredBy', u'Question', u'Answer', u'Rate', u'Trace']
df.to_csv(stats_csv, index=False, columns=columns)
logger.info("Write statistic records to {}".format(stats_csv))
records = len(df)
rates = len(df[df.Rate.notnull()])
good_rates = len(df[df.Rate.isin(['good'])])
bad_rates = len(df[df.Rate.isin(['bad'])])
if records > 0:
csd = float(records - bad_rates) / records
response = {
'customers_satisfaction_degree': csd,
'number_of_records': records,
'number_of_rates': rates,
'number_of_good_rates': good_rates,
'number_of_bad_rates': bad_rates,
}
return response
def playback_history(df):
from client import Client
client = Client(os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC'), test=True)
pattern_column = []
for question in df.Question:
answer = client.ask(question, True)
traces = answer.get('trace')
patterns = []
if traces:
for trace in traces:
match_obj = trace_pattern.match(trace)
if match_obj:
patterns.append(match_obj.group('pname'))
pattern_column.append(patterns)
df.loc[:,'Pattern'] = pd.Series(pattern_column, index=df.index)
return df
def pattern_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
df = playback_history(df)
patterns = sum(df.Pattern, [])
counter = Counter(patterns)
pattern_freq = pd.Series(counter)
pattern_freq.sort(ascending=False)
stats_csv = '{}/pattern_frequency.csv'.format(history_dir)
pattern_freq.to_csv(stats_csv)
logger.info("Write pattern statistic to {}".format(stats_csv))
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), 7)
pattern_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
| mit |
hdmetor/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
apaloczy/ap_tools | utils.py | 1 | 54151 | # Description: General-purpose functions for personal use.
# Author: André Palóczy
# E-mail: paloczy@gmail.com
__all__ = ['seasonal_avg',
'seasonal_std',
'deseason',
'blkavg',
'blkavgdir',
'blkavgt',
'blkapply',
'stripmsk',
'pydatetime2m_arr',
'm2pydatetime_arr',
'npdt2dt',
'dt2sfloat',
'doy2date',
'flowfun',
'cumsimp',
'rot_vec',
'avgdir',
'lon180to360',
'lon360to180',
'bbox2ij',
'xy2dist',
'get_xtrackline',
'get_arrdepth',
'fpointsbox',
'near',
'near2',
'mnear',
'refine',
'denan',
'standardize',
'linear_trend',
'thomas',
'point_in_poly',
'get_mask_from_poly',
'sphericalpolygon_area',
'greatCircleBearing',
'weim',
'smoo2',
'topo_slope',
'curvature_geometric',
'get_isobath',
'angle_isobath',
'isopyc_depth',
'whiten_zero',
'wind2stress',
'gen_dates',
'fmt_isobath',
'float2latex',
'mat2npz',
'bb_map',
'dots_dualcolor']
from os import system
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import path
from mpl_toolkits.basemap import Basemap
from datetime import datetime, timedelta
from dateutil import rrule, parser
from scipy.io import loadmat, savemat
from scipy import signal
from scipy.signal import savgol_filter
from glob import glob
from netCDF4 import Dataset, num2date, date2num
# from pandas import rolling_window # FIXME, new pandas way of doing this is, e.g., arr = Series(...).rolling(...).mean()
from pandas import Timestamp
from gsw import distance
from pygeodesy import Datums, VincentyError
from pygeodesy.ellipsoidalVincenty import LatLon as LatLon
from pygeodesy.sphericalNvector import LatLon as LatLon_sphere
def seasonal_avg(t, F):
"""
USAGE
-----
F_seasonal = seasonal_avg(t, F)
Calculates the seasonal average of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
"""
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].mean() for ft in ftmo])
def seasonal_std(t, F):
"""
USAGE
-----
F_seasonal = seasonal_std(t, F)
Calculates the seasonal standard deviation of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
"""
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].std() for ft in ftmo])
def deseason(t, F):
"""
USAGE
-----
F_nonssn = deseason(t, F)
Removes the seasonal signal of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
Also assumes that F is sampled monthly and only for
complete years (i.e., t.size is a multiple of 12).
"""
Fssn = seasonal_avg(t, F)
nyears = int(t.size/12)
aux = np.array([])
for n in range(nyears):
aux = np.concatenate((aux, Fssn))
return F - aux
def blkavg(x, y, every=2):
"""
Block-averages a variable y(x). Returns its block average
and standard deviation and new x axis.
"""
nx = x.size
xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nx+every, every):
yi = y[i-every:i]
xblk = np.append(xblk, np.nanmean(x[i-every:i]))
yblk = np.append(yblk, np.nanmean(yi))
yblkstd = np.append(yblkstd, np.nanstd(yi))
return xblk, yblk, yblkstd
def blkavgdir(x, ydir, every=2, degrees=False, axis=None):
"""
Block-averages a PERIODIC variable ydir(x). Returns its
block average and new x axis.
"""
nx = x.size
xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nx+every, every):
xblk = np.append(xblk, np.nanmean(x[i-every:i]))
yblk = np.append(yblk, avgdir(ydir[i-every:i], degrees=degrees, axis=axis))
return xblk, yblk
def blkavgt(t, x, every=2):
"""
Block-averages a variable x(t). Returns its block average
and the new t axis.
"""
nt = t.size
units = 'days since 01-01-01'
calendar = 'proleptic_gregorian'
t = date2num(t, units=units, calendar=calendar)
tblk, xblk = np.array([]), np.array([])
for i in range(every, nt+every, every):
xi = x[i-every:i]
tblk = np.append(tblk, np.nanmean(t[i-every:i]))
xblk = np.append(xblk, np.nanmean(xi))
tblk = num2date(tblk, units=units, calendar=calendar)
return tblk, xblk
def blkapply(x, f, nblks, overlap=0, demean=False, detrend=False, verbose=True):
"""
Divides array 'x' in 'nblks' blocks and applies function 'f' = f(x) on
each block.
"""
x = np.array(x)
assert callable(f), "f must be a function"
nx = x.size
ni = int(nx/nblks) # Number of data points in each chunk.
y = np.zeros(ni) # Array that will receive each block.
dn = int(round(ni - overlap*ni)) # How many indices to move forward with
# each chunk (depends on the % overlap).
# Demean/detrend the full record first (removes the lowest frequencies).
# Then, also demean/detrend each block beffore applying f().
if demean: x = x - x.mean()
if detrend: x = signal.detrend(x, type='linear')
n=0
il, ir = 0, ni
while ir<=nx:
xn = x[il:ir]
if demean: xn = xn - xn.mean()
if detrend: xn = signal.detrend(xn, type='linear')
y = y + f(xn) # Apply function and accumulate the current bock.
il+=dn; ir+=dn
n+=1
y /= n # Divide by number of blocks actually used.
ncap = nx - il # Number of points left out at the end of array.
if verbose:
print("")
print("Left last %d data points out (%.1f %% of all points)."%(ncap,100*ncap/nx))
if overlap>0:
print("")
print("Intended %d blocks, but could fit %d blocks, with"%(nblks,n))
print('overlap of %.1f %%, %d points per block.'%(100*overlap,dn))
print("")
return y
def stripmsk(arr, mask_invalid=False):
if mask_invalid:
arr = np.ma.masked_invalid(arr)
if np.ma.isMA(arr):
msk = arr.mask
arr = arr.data
arr[msk] = np.nan
return arr
def pydatetime2m_arr(pydt_arr):
pydt_arr = np.array(pydt_arr)
secperyr = 86400.0
timedt = timedelta(days=366)
matdt = []
for pydt in pydt_arr.tolist():
m = pydt.toordinal() + timedt
dfrac = pydt - datetime(pydt.year,pydt.month,pydt.day,0,0,0).seconds/secperyr
matdt.append(m.toordinal() + dfrac)
return np.array(matdt)
def m2pydatetime_arr(mdatenum_arr):
mdatenum_arr = np.array(mdatenum_arr)
timedt = timedelta(days=366)
pydt = []
for mdt in mdatenum_arr.tolist():
d = datetime.fromordinal(int(mdt))
dfrac = timedelta(days=mdt%1) - timedt
pydt.append(d + dfrac)
return np.array(pydt)
def npdt2dt(tnp):
"""
USAGE
-----
t_datetime = npdt2dt(t_numpydatetime64)
Convert an array of numpy.datetime64 timestamps to datetime.datetime.
"""
return np.array([Timestamp(ti).to_pydatetime() for ti in tnp])
def dt2sfloat(t):
"""
USAGE
-----
t_float = dt2sfloat(t_datetime)
Convert an array of datetime.datetime timestamps to an array of floats
representing elapsed seconds since the first timestamp.
"""
t = np.array(t)
t0 = t[0]
return np.array([(tn - t0).total_seconds() for tn in t])
def doy2date(doy, year=2017):
"""
USAGE
-----
t = doy2date(doy, year=2017)
Convert an array `doy` of decimal yeardays to
an array of datetime.datetime timestamps.
"""
doy = np.array(doy)*86400 # [seconds/day].
tunit = 'seconds since %d-01-01 00:00:00'%year
return np.array([num2date(dn, tunit) for dn in doy])
def flowfun(x, y, u, v, variable='psi', geographic=True):
"""
FLOWFUN Computes the potential PHI and the streamfunction PSI
of a 2-dimensional flow defined by the matrices of velocity
components U and V, so that
d(PHI) d(PSI) d(PHI) d(PSI)
u = ----- - ----- , v = ----- + -----
dx dy dx dy
P = FLOWFUN(x,y,u,v) returns an array P of the same size as u and v,
which can be the velocity potential (PHI) or the streamfunction (PSI)
Because these scalar fields are defined up to the integration constant,
their absolute values are such that PHI[0,0] = PSI[0,0] = 0.
For a potential (irrotational) flow PSI = 0, and the Laplacian
of PSI is equal to the divergence of the velocity field.
A solenoidal (non-divergent) flow can be described by the
streamfunction alone, and the Laplacian of the streamfunction
is equal to the vorticity (curl) of the velocity field.
The units of the grid coordinates are assumed to be consistent
with the units of the velocity components, e.g., [m] and [m/s].
If variable=='psi', the streamfunction (PSI) is returned.
If variable=='phi', the velocity potential (PHI) is returned.
If geographic==True (default), (x,y) are assumed to be
(longitude,latitude) and are converted to meters before
computing (dx,dy).
If geographic==False, (x,y) are assumed to be in meters.
Uses function 'cumsimp()' (Simpson rule summation).
Author: Kirill K. Pankratov, March 7, 1994.
Source: http://www-pord.ucsd.edu/~matlab/stream.htm
Translated to Python by André Palóczy, January 15, 2015.
Modified by André Palóczy on January 15, 2015.
"""
x,y,u,v = map(np.asanyarray, (x,y,u,v))
if not x.shape==y.shape==u.shape==v.shape:
print("Error: Arrays (x, y, u, v) must be of equal shape.")
return
## Calculating grid spacings.
if geographic:
dlat, _ = np.gradient(y)
_, dlon = np.gradient(x)
deg2m = 111120.0 # [m/deg]
dx = dlon*deg2m*np.cos(y*np.pi/180.) # [m]
dy = dlat*deg2m # [m]
else:
dy, _ = np.gradient(y)
_, dx = np.gradient(x)
ly, lx = x.shape # Shape of the (x,y,u,v) arrays.
## Now the main computations.
## Integrate velocity fields to get potential and streamfunction.
## Use Simpson rule summation (function CUMSIMP).
## Compute velocity potential PHI (non-rotating part).
if variable=='phi':
cx = cumsimp(u[0,:]*dx[0,:]) # Compute x-integration constant
cy = cumsimp(v[:,0]*dy[:,0]) # Compute y-integration constant
cx = np.expand_dims(cx, 0)
cy = np.expand_dims(cy, 1)
phiy = cumsimp(v*dy) + np.tile(cx, (ly,1))
phix = cumsimp(u.T*dx.T).T + np.tile(cy, (1,lx))
phi = (phix + phiy)/2.
return phi
## Compute streamfunction PSI (non-divergent part).
if variable=='psi':
cx = cumsimp(v[0,:]*dx[0,:]) # Compute x-integration constant
cy = cumsimp(u[:,0]*dy[:,0]) # Compute y-integration constant
cx = np.expand_dims(cx, 0)
cy = np.expand_dims(cy, 1)
psix = -cumsimp(u*dy) + np.tile(cx, (ly,1))
psiy = cumsimp(v.T*dx.T).T - np.tile(cy, (1,lx))
psi = (psix + psiy)/2.
return psi
def cumsimp(y):
"""
F = CUMSIMP(Y) Simpson-rule column-wise cumulative summation.
Numerical approximation of a function F(x) such that
Y(X) = dF/dX. Each column of the input matrix Y represents
the value of the integrand Y(X) at equally spaced points
X = 0,1,...size(Y,1).
The output is a matrix F of the same size as Y.
The first row of F is equal to zero and each following row
is the approximation of the integral of each column of matrix
Y up to the givem row.
CUMSIMP assumes continuity of each column of the function Y(X)
and uses Simpson rule summation.
Similar to the command F = CUMSUM(Y), exept for zero first
row and more accurate summation (under the assumption of
continuous integrand Y(X)).
Author: Kirill K. Pankratov, March 7, 1994.
Source: http://www-pord.ucsd.edu/~matlab/stream.htm
Translated to Python by André Palóczy, January 15, 2015.
"""
y = np.asanyarray(y)
## 3-point interpolation coefficients to midpoints.
## Second-order polynomial (parabolic) interpolation coefficients
## from Xbasis = [0 1 2] to Xint = [.5 1.5]
c1 = 3/8.
c2 = 6/8.
c3 = -1/8.
if y.ndim==1:
y = np.expand_dims(y,1)
f = np.zeros((y.size,1)) # Initialize summation array.
squeeze_after = True
elif y.ndim==2:
f = np.zeros(y.shape) # Initialize summation array.
squeeze_after = False
else:
print("Error: Input array has more than 2 dimensions.")
return
if y.size==2: # If only 2 elements in columns - simple average.
f[1,:] = (y[0,:] + y[1,:])/2.
return f
else: # If more than two elements in columns - Simpson summation.
## Interpolate values of y to all midpoints.
f[1:-1,:] = c1*y[:-2,:] + c2*y[1:-1,:] + c3*y[2:,:]
f[2:,:] = f[2:,:] + c3*y[:-2,:] + c2*y[1:-1,:] + c1*y[2:,:]
f[1,:] = f[1,:]*2
f[-1,:] = f[-1,:]*2
## Simpson (1,4,1) rule.
f[1:,:] = 2*f[1:,:] + y[:-1,:] + y[1:,:]
f = np.cumsum(f, axis=0)/6. # Cumulative sum, 6 - denominator from the Simpson rule.
if squeeze_after:
f = f.squeeze()
return f
def rot_vec(u, v, angle=-45, degrees=True):
"""
USAGE
-----
u_rot,v_rot = rot_vec(u,v,angle=-45.,degrees=True)
Returns the rotated vector components (`u_rot`,`v_rot`)
from the zonal-meridional input vector components (`u`,`v`).
The rotation is done using the angle `angle` positive counterclockwise
(trigonometric convention). If `degrees` is set to `True``(default),
then `angle` is converted to radians.
is
Example
-------
>>> from matplotlib.pyplot import quiver
>>> from ap_tools.utils import rot_vec
>>> u = -1.
>>> v = -1.
>>> u2,v2 = rot_vec(u,v, angle=-30.)
"""
u,v = map(np.asanyarray, (u,v))
if degrees:
angle = angle*np.pi/180. # Degrees to radians.
u_rot = +u*np.cos(angle) + v*np.sin(angle) # Usually the across-shore component.
v_rot = -u*np.sin(angle) + v*np.cos(angle) # Usually the along-shore component.
return u_rot,v_rot
def avgdir(dirs, degrees=False, axis=None):
"""
USAGE
-----
dirm = avgdir(dirs, degrees=False, axis=None)
Calculate the mean direction of an array of directions 'dirs'.
If 'degrees' is 'False' (default), the input directions must be
in radians. If 'degrees' is 'True', the input directions must be
in degrees.
The direction angle is measured from the ZONAL axis, i.e.,
(0, 90, -90) deg are (Eastward, Northward, Southward).
180 and -180 deg are both Westward.
If 'axis' is 'None' (default) the mean is calculated on the
flattened array. Otherwise, 'axis' is the index of the axis
to calculate the mean over.
"""
dirs = np.array(dirs)
if degrees:
dirs = dirs*np.pi/180 # Degrees to radians.
uxs = np.cos(dirs)
vys = np.sin(dirs)
dirm = np.arctan2(vys.sum(axis=axis), uxs.sum(axis=axis))
if degrees:
dirm = dirm*180/np.pi # From radians to degrees.
return dirm
def lon180to360(lon):
"""
Converts longitude values in the range [-180,+180]
to longitude values in the range [0,360].
"""
lon = np.asanyarray(lon)
return (lon + 360.0) % 360.0
def lon360to180(lon):
"""
Converts longitude values in the range [0,360]
to longitude values in the range [-180,+180].
"""
lon = np.asanyarray(lon)
return ((lon + 180.) % 360.) - 180.
def bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True):
"""
USAGE
-----
ilon_start, ilon_end, jlat_start, jlat_end = bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True)
OR
(ilon_start_left, ilon_end_left, jlat_start, jlat_end), (ilon_start_right, ilon_end_right, jlat_start, jlat_end) = ...
... bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True)
Return indices for i,j that will completely cover the specified bounding box. 'lon' and 'lat' are 2D coordinate arrays
(generated by meshgrid), and 'bbox' is a list like [lon_start, lon_end, lat_start, lat_end] describing the desired
longitude-latitude box.
If the specified bbox is such that it crosses the edges of the longitude array, two tuples of indices are returned.
The first (second) tuple traces out the left (right) part of the bbox.
If FIX_IDL is set to 'True' (default), the indices returned correspond to the "short route" around the globe, which
amounts to assuming that the specified bbox crosses the International Date. If FIX_IDL is set to 'False', the
"long route" is used instead.
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> lon = np.arange(-180., 180.25, 0.25)
>>> lat = np.arange(-90., 90.25, 0.25)
>>> lon, lat = np.meshgrid(lon, lat)
>>> h = np.sin(lon) + np.cos(lat)
>>> i0, i1, j0, j1 = bbox2ij(lon, lat, bbox=[-71, -63., 39., 46])
>>> h_subset = h[j0:j1,i0:i1]
>>> lon_subset = lon[j0:j1,i0:i1]
>>> lat_subset = lat[j0:j1,i0:i1]
>>> fig, ax = plt.subplots()
>>> ax.pcolor(lon_subset,lat_subset,h_subset)
>>> plt.axis('tight')
Original function downloaded from http://gis.stackexchange.com/questions/71630/subsetting-a-curvilinear-netcdf-file-roms-model-output-using-a-lon-lat-boundin
Modified by André Palóczy on August 20, 2016 to handle bboxes that
cross the International Date Line or the edges of the longitude array.
"""
lon, lat, bbox = map(np.asanyarray, (lon, lat, bbox))
# Test whether the wanted bbox crosses the International Date Line (brach cut of the longitude array).
dlon = bbox[:2].ptp()
IDL_BBOX=dlon>180.
IDL_BBOX=np.logical_and(IDL_BBOX, FIX_IDL)
mypath = np.array([bbox[[0,1,1,0]], bbox[[2,2,3,3]]]).T
p = path.Path(mypath)
points = np.vstack((lon.flatten(), lat.flatten())).T
n, m = lon.shape
inside = p.contains_points(points).reshape((n, m))
# Fix mask if bbox goes throught the International Date Line.
if IDL_BBOX:
fcol=np.all(~inside, axis=0)
flin=np.any(inside, axis=1)
fcol, flin = map(np.expand_dims, (fcol, flin), (0, 1))
fcol = np.tile(fcol, (n, 1))
flin = np.tile(flin, (1, m))
inside=np.logical_and(flin, fcol)
print("Bbox crosses the International Date Line.")
ii, jj = np.meshgrid(range(m), range(n))
iiin, jjin = ii[inside], jj[inside]
i0, i1, j0, j1 = min(iiin), max(iiin), min(jjin), max(jjin)
SPLIT_BBOX=(i1-i0)==(m-1) # Test whether the wanted bbox crosses edges of the longitude array.
# If wanted bbox crosses edges of the longitude array, return indices for the two boxes separately.
if SPLIT_BBOX:
Iiin = np.unique(iiin)
ib0 = np.diff(Iiin).argmax() # Find edge of the inner side of the left bbox.
ib1 = ib0 + 1 # Find edge of the inner side of the right bbox.
Il, Ir = Iiin[ib0], Iiin[ib1] # Indices of the columns that bound the inner side of the two bboxes.
print("Bbox crosses edges of the longitude array. Returning two sets of indices.")
return (i0, Il, j0, j1), (Ir, i1, j0, j1)
else:
return i0, i1, j0, j1
def xy2dist(x, y, cyclic=False, datum='WGS84'):
"""
USAGE
-----
d = xy2dist(x, y, cyclic=False, datum='WGS84')
Calculates a distance axis from a line defined by longitudes and latitudes
'x' and 'y', using either the Vicenty formulae on an ellipsoidal earth
(ellipsoid defaults to WGS84) or on a sphere (if datum=='Sphere').
Example
-------
>>> yi, yf = -23.550520, 32.71573800
>>> xi, xf = -46.633309, -117.161084
>>> x, y = np.linspace(xi, xf), np.linspace(yi, yf)
>>> d_ellipse = xy2dist(x, y, datum='WGS84')[-1]*1e-3 # [km].
>>> d_sphere = xy2dist(x, y, datum='Sphere')[-1]*1e-3 # [km].
>>> dd = np.abs(d_ellipse - d_sphere)
>>> dperc = 100*dd/d_ellipse
>>> msg = 'Difference of %.1f km over a %.0f km-long line (%.3f %% difference)'%(dd, d_ellipse, dperc)
>>> print(msg)
"""
if datum!="Sphere":
xy = [LatLon(y0, x0, datum=Datums[datum]) for x0, y0 in zip(x, y)]
else:
xy = [LatLon_sphere(y0, x0) for x0, y0 in zip(x, y)]
d = np.array([xy[n].distanceTo(xy[n+1]) for n in range(len(xy)-1)])
return np.append(0, np.cumsum(d))
def get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=10):
"""
USAGE
-----
lonp, latp = get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=13)
Generates a great-circle line with length 2L (with L in km) that is perpendicular to the great-circle line
defined by the input points (lon1, lat1) and (lon2, lat2). The spacing between the points along the output
line is dL km. Assumes a spherical Earth.
"""
km2m = 1e3
L, dL = L*km2m, dL*km2m
nh = int(L/dL)
p1, p2 = LatLon_sphere(lat1, lon1), LatLon_sphere(lat2, lon2)
angperp = p1.initialBearingTo(p2) + 90
angperpb = angperp + 180
pm = p1.midpointTo(p2)
# Create perpendicular line starting from the midpoint.
N = range(1, nh + 1)
pperp = []
_ = [pperp.append(pm.destination(dL*n, angperpb)) for n in N]
pperp.reverse()
pperp.append(pm)
_ = [pperp.append(pm.destination(dL*n, angperp)) for n in N]
lonperp = np.array([p.lon for p in pperp])
latperp = np.array([p.lat for p in pperp])
return lonperp, latperp
def get_arrdepth(arr):
"""
USAGE
-----
arr_depths = get_arrdepth(arr)
Determine number of nested levels in each
element of an array of arrays of arrays...
(or other array-like objects).
"""
arr = np.array(arr) # Make sure first level is an array.
all_nlevs = []
for i in range(arr.size):
nlev=0
wrk_arr = arr[i]
while np.size(wrk_arr)>0:
try:
wrk_arr = np.array(wrk_arr[i])
except Exception:
all_nlevs.append(nlev)
nlev=0
break
nlev+=1
return np.array(all_nlevs)
def fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True):
"""
USAGE
-----
fpts = fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True)
Find points in a rectangle made with 2 ginput points.
"""
fpts = np.array([])
for n in range(nboxes):
box = np.array(fig.ginput(n=2, timeout=0))
try:
xb, yb = box[:,0], box[:,1]
except IndexError:
print("No points selected. Skipping box \# %d."%(n+1))
continue
xl, xr, yd, yu = xb.min(), xb.max(), yb.min(), yb.max()
xbox = np.array([xl, xr, xr, xl, xl])
ybox = np.array([yd, yd, yu, yu, yd])
fxbox, fybox = np.logical_and(x>xl, x<xr), np.logical_and(y>yd, y<yu)
fptsi = np.logical_and(fxbox, fybox)
if return_index:
fptsi = np.where(fptsi)[0]
fpts = np.append(fpts, fptsi)
if plot:
ax.plot(xbox, ybox, 'r', linestyle='solid', marker='o', ms=4)
ax.plot(x[fptsi], y[fptsi], 'r', linestyle='none', marker='+', ms=5)
plt.draw()
fig.show()
else:
fig.close()
if plot:
plt.draw()
fig.show()
system("sleep %d"%pause_secs)
return fpts
def near(x, x0, npts=1, return_index=False):
"""
USAGE
-----
xnear = near(x, x0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in array 'x'
that are closest to a specified 'x0' point.
If 'return_index' is True (defauts to False),
then the indices of the closest points are
returned. The indices are ordered in order of
closeness.
"""
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def near2(x, y, x0, y0, npts=1, return_index=False):
"""
USAGE
-----
xnear, ynear = near2(x, y, x0, y0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in arrays 'x' and 'y'
that are closest to a specified '(x0, y0)' point. If
'return_index' is True (defauts to False), then the
indices of the closest point(s) are returned.
Example
-------
>>> x = np.arange(0., 100., 0.25)
>>> y = np.arange(0., 100., 0.25)
>>> x, y = np.meshgrid(x, y)
>>> x0, y0 = 44.1, 30.9
>>> xn, yn = near2(x, y, x0, y0, npts=1)
>>> print("(x0, y0) = (%f, %f)"%(x0, y0))
>>> print("(xn, yn) = (%f, %f)"%(xn, yn))
"""
x, y = map(np.array, (x, y))
shp = x.shape
xynear = []
xyidxs = []
dx = x - x0
dy = y - y0
dr = dx**2 + dy**2
for n in range(npts):
xyidx = np.unravel_index(np.nanargmin(dr), dims=shp)
if return_index:
xyidxs.append(xyidx)
xyn = (x[xyidx], y[xyidx])
xynear.append(xyn)
dr[xyidx] = np.nan
if npts==1:
xynear = xynear[0]
if return_index:
xyidxs = xyidxs[0]
if return_index:
return xyidxs
else:
return xynear
def mnear(x, y, x0, y0):
"""
USAGE
-----
xmin,ymin = mnear(x, y, x0, y0)
Finds the the point in a (lons,lats) line
that is closest to a specified (lon0,lat0) point.
"""
x,y,x0,y0 = map(np.asanyarray, (x,y,x0,y0))
point = (x0,y0)
d = np.array([])
for n in range(x.size):
xn,yn = x[n],y[n]
dn = distance((xn,x0),(yn,y0)) # Calculate distance point-wise.
d = np.append(d,dn)
idx = d.argmin()
return x[idx],y[idx]
def refine(line, nref=100, close=True):
"""
USAGE
-----
ref_line = refine(line, nref=100, close=True)
Given a 1-D sequence of points 'line', returns a
new sequence 'ref_line', which is built by linearly
interpolating 'nref' points between each pair of
subsequent points in the original line.
If 'close' is True (default), the first value of
the original line is repeated at the end of the
refined line, as in a closed polygon.
"""
line = np.squeeze(np.asanyarray(line))
if close:
line = np.append(line,line[0])
ref_line = np.array([])
for n in range(line.shape[0]-1):
xi, xf = line[n], line[n+1]
xref = np.linspace(xi,xf,nref)
ref_line = np.append(ref_line, xref)
return ref_line
def point_in_poly(x,y,poly):
"""
USAGE
-----
isinside = point_in_poly(x,y,poly)
Determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs. This fuction
returns True or False. The algorithm is called
'Ray Casting Method'.
Source: http://pseentertainmentcorp.com/smf/index.php?topic=545.0
"""
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def get_mask_from_poly(xp, yp, poly, verbose=False):
"""
USAGE
-----
mask = get_mask_from_poly(xp, yp, poly, verbose=False)
Given two arrays 'xp' and 'yp' of (x,y) coordinates (generated by meshgrid)
and a polygon defined by an array of (x,y) coordinates 'poly', with
shape = (n,2), return a boolean array 'mask', where points that lie inside
'poly' are set to 'True'.
"""
print('Building the polygon mask...')
jmax, imax = xp.shape
mask = np.zeros((jmax,imax))
for j in range(jmax):
if verbose:
print("Row %s of %s"%(j+1,jmax))
for i in range(imax):
px, py = xp[j,i], yp[j,i]
# Test if this point is within the polygon.
mask[j,i] = point_in_poly(px, py, poly)
return mask
def sphericalpolygon_area(lons, lats, R=6371000.):
"""
USAGE
-----
area = sphericalpolygon_area(lons, lats, R=6371000.)
Calculates the area of a polygon on the surface of a sphere of
radius R using Girard's Theorem, which states that the area of
a polygon of great circles is R**2 times the sum of the angles
between the polygons minus (N-2)*pi, where N is number of corners.
R = 6371000 m (6371 km, default) is a typical value for the mean
radius of the Earth.
Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python
"""
lons, lats = map(np.asanyarray, (lons, lats))
N = lons.size
angles = np.empty(N)
for i in range(N):
phiB1, phiA, phiB2 = np.roll(lats, i)[:3]
LB1, LA, LB2 = np.roll(lons, i)[:3]
# calculate angle with north (eastward)
beta1 = greatCircleBearing(LA, phiA, LB1, phiB1)
beta2 = greatCircleBearing(LA, phiA, LB2, phiB2)
# calculate angle between the polygons and add to angle array
angles[i] = np.arccos(np.cos(-beta1)*np.cos(-beta2) + np.sin(-beta1)*np.sin(-beta2))
return (np.sum(angles) - (N-2)*np.pi)*R**2
def greatCircleBearing(lon1, lat1, lon2, lat2):
"""
USAGE
-----
angle = greatCircleBearing(lon1, lat1, lon2, lat2)
Calculates the angle (positive eastward) a
great circle passing through points (lon1,lat1)
and (lon2,lat2) makes with true nirth.
Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python
"""
lon1, lat1, lon2, lat2 = map(np.asanyarray, (lon1, lat1, lon2, lat2))
dLong = lon1 - lon2
d2r = np.pi/180.
s = np.cos(d2r*lat2)*np.sin(d2r*dLong)
c = np.cos(d2r*lat1)*np.sin(d2r*lat2) - np.sin(lat1*d2r)*np.cos(d2r*lat2)*np.cos(d2r*dLong)
return np.arctan2(s, c)
def weim(x, N, kind='hann', badflag=-9999, beta=14):
"""
Usage
-----
xs = weim(x, N, kind='hann', badflag=-9999, beta=14)
Description
-----------
Calculates the smoothed array 'xs' from the original array 'x' using the specified
window of type 'kind' and size 'N'. 'N' must be an odd number.
Parameters
----------
x : 1D array
Array to be smoothed.
N : integer
Window size. Must be odd.
kind : string, optional
One of the window types available in the numpy module:
hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed.
hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is
discontinuous at the edges, and may produce undesired artifacts.
blackman : Similar to the hann and hamming windows, with sharper ends.
bartlett : Triangular-like. Its end-points are zeroed.
kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter.
For beta=0, the window is rectangular. As beta increases, the window gets narrower.
Refer to the numpy functions for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the kaiser window,
this parameter does nothing.
Returns
-------
xs : 1D array
The smoothed array.
---------------------------------------
André Palóczy Filho (paloczy@gmail.com)
June 2012
==============================================================================================================
"""
###########################################
### Checking window type and dimensions ###
###########################################
kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser']
if ( kind not in kinds ):
raise ValueError('Invalid window type requested: %s'%kind)
if np.mod(N,2) == 0:
raise ValueError('Window size must be odd')
###########################
### Creating the window ###
###########################
if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required).
wstr = 'np.kaiser(N, beta)'
else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required).
if kind == 'hann':
kind = 'hanning'
wstr = 'np.' + kind + '(N)'
w = eval(wstr)
x = np.asarray(x).flatten()
Fnan = np.isnan(x).flatten()
ln = (N-1)/2
lx = x.size
lf = lx - ln
xs = np.nan*np.ones(lx)
# Eliminating bad data from mean computation.
fbad=x==badflag
x[fbad] = np.nan
for i in range(lx):
if i <= ln:
xx = x[:ln+i+1]
ww = w[ln-i:]
elif i >= lf:
xx = x[i-ln:]
ww = w[:lf-i-1]
else:
xx = x[i-ln:i+ln+1]
ww = w.copy()
f = ~np.isnan(xx) # Counting only NON-NaNs, both in the input array and in the window points.
xx = xx[f]
ww = ww[f]
if f.sum() == 0: # Thou shalt not divide by zero.
xs[i] = x[i]
else:
xs[i] = np.sum(xx*ww)/np.sum(ww)
xs[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array.
return xs
def smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14):
"""
Usage
-----
As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14)
Description
-----------
Calculates the smoothed array 'As' from the original array 'A' using the specified
window of type 'kind' and shape ('hei','wid').
Parameters
----------
A : 2D array
Array to be smoothed.
hei : integer
Window height. Must be odd and greater than or equal to 3.
wid : integer
Window width. Must be odd and greater than or equal to 3.
kind : string, optional
One of the window types available in the numpy module:
hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed.
hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is
discontinuous at the edges, and may produce undesired artifacts.
blackman : Similar to the hann and hamming windows, with sharper ends.
bartlett : Triangular-like. Its end-points are zeroed.
kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter.
For beta=0, the window is rectangular. As beta increases, the window gets narrower.
Refer to the numpy functions for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the kaiser window,
this parameter does nothing.
Returns
-------
As : 2D array
The smoothed array.
---------------------------------------
André Palóczy Filho (paloczy@gmail.com)
April 2012
==============================================================================================================
"""
###########################################
### Checking window type and dimensions ###
###########################################
kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser']
if ( kind not in kinds ):
raise ValueError('Invalid window type requested: %s'%kind)
if ( np.mod(hei,2) == 0 ) or ( np.mod(wid,2) == 0 ):
raise ValueError('Window dimensions must be odd')
if (hei <= 1) or (wid <= 1):
raise ValueError('Window shape must be (3,3) or greater')
##############################
### Creating the 2D window ###
##############################
if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required).
wstr = 'np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))'
else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required).
if kind == 'hann':
kind = 'hanning'
# computing outer product to make a 2D window out of the original 1d windows.
wstr = 'np.outer(np.' + kind + '(hei), np.' + kind + '(wid))'
wdw = eval(wstr)
A = np.asanyarray(A)
Fnan = np.isnan(A)
imax, jmax = A.shape
As = np.nan*np.ones( (imax, jmax) )
for i in range(imax):
for j in range(jmax):
### Default window parameters.
wupp = 0
wlow = hei
wlef = 0
wrig = wid
lh = np.floor(hei/2)
lw = np.floor(wid/2)
### Default array ranges (functions of the i,j indices).
upp = i-lh
low = i+lh+1
lef = j-lw
rig = j+lw+1
##################################################
### Tiling window and input array at the edges ###
##################################################
# Upper edge.
if upp < 0:
wupp = wupp-upp
upp = 0
# Left edge.
if lef < 0:
wlef = wlef-lef
lef = 0
# Bottom edge.
if low > imax:
ex = low-imax
wlow = wlow-ex
low = imax
# Right edge.
if rig > jmax:
ex = rig-jmax
wrig = wrig-ex
rig = jmax
###############################################
### Computing smoothed value at point (i,j) ###
###############################################
Ac = A[upp:low, lef:rig]
wdwc = wdw[wupp:wlow, wlef:wrig]
fnan = np.isnan(Ac)
Ac[fnan] = 0; wdwc[fnan] = 0 # Eliminating NaNs from mean computation.
fbad = Ac==badflag
wdwc[fbad] = 0 # Eliminating bad data from mean computation.
a = Ac * wdwc
As[i,j] = a.sum() / wdwc.sum()
As[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array.
return As
def denan(arr):
"""
USAGE
-----
denaned_arr = denan(arr)
Remove the NaNs from an array.
"""
f = np.isnan(arr)
return arr[~f]
def standardize(series):
"""
USAGE
-----
series2 = standardize(series)
Standardizes a series by subtracting its mean value
and dividing by its standard deviation. The result is
a dimensionless series. Inputs can be of type
"np.array", or "Pandas.Series"/"Pandas.TimeSeries".
"""
Mean, Std = series.mean(), series.std()
return (series - Mean)/Std
def linear_trend(series, return_line=True):
"""
USAGE
-----
line = linear_trend(series, return_line=True)
OR
b, a, x = linear_trend(series, return_line=False)
Returns the linear fit (line = b*x + a) associated
with the 'series' array.
Adapted from pylab.detrend_linear.
"""
series = np.asanyarray(series)
x = np.arange(series.size, dtype=np.float_)
C = np.cov(x, series, bias=1) # Covariance matrix.
b = C[0, 1]/C[0, 0] # Angular coefficient.
a = series.mean() - b*x.mean() # Linear coefficient.
line = b*x + a
if return_line:
return line
else:
return b, a, x
def thomas(A, b):
"""
USAGE
-----
x = thomas(A,b)
Solve Ax = b (where A is a tridiagonal matrix)
using the Thomas Algorithm.
References
----------
For a step-by-step derivation of the algorithm, see
e.g., http://www3.ul.ie/wlee/ms6021_thomas.pdf
"""
# Step 1: Sweep rows from top to bottom,
# calculating gammas and rhos along the way.
N = b.size
gam = [float(A[0,1]/A[0,0])]
rho = [float(b[0]/A[0,0])]
for i in range(0, N):
rho.append(float((b[i] - A[i,i-1]*rho[-1])/(A[i,i] - A[i,i-1]*gam[-1])))
if i<N-1: # No gamma in the last row.
gam.append(float(A[i,i+1]/(A[i,i] - A[i,i-1]*gam[-1])))
# Step 2: Substitute solutions for unknowns
# starting from the bottom row all the way up.
x = [] # Vector of unknowns.
x.append(rho.pop()) # Last row is already solved.
for i in range(N-2, -1, -1):
x.append(float(rho.pop() - gam.pop()*x[-1]))
x.reverse()
return np.array(x)
def topo_slope(lon, lat, h):
"""
USAGE
-----
lons, lats, slope = topo_slope(lon, lat, h)
Calculates bottom slope for a topography fields 'h' at
coordinates ('lon', 'lat') using first-order finite differences.
The output arrays have shape (M-1,L-1), where M,L = h.shape().
"""
lon,lat,h = map(np.asanyarray, (lon,lat,h))
deg2m = 1852.*60. # m/deg.
deg2rad = np.pi/180. # rad/deg.
x = lon*deg2m*np.cos(lat*deg2rad)
y = lat*deg2m
# First-order differences, accurate to O(dx) and O(dy),
# respectively.
sx = (h[:,1:] - h[:,:-1]) / (x[:,1:] - x[:,:-1])
sy = (h[1:,:] - h[:-1,:]) / (y[1:,:] - y[:-1,:])
# Finding the values of the derivatives sx and sy
# at the same location in physical space.
sx = 0.5*(sx[1:,:]+sx[:-1,:])
sy = 0.5*(sy[:,1:]+sy[:,:-1])
# Calculating the bottom slope.
slope = np.sqrt(sx**2 + sy**2)
# Finding the lon,lat coordinates of the
# values of the derivatives sx and sy.
lons = 0.5*(lon[1:,:]+lon[:-1,:])
lats = 0.5*(lat[1:,:]+lat[:-1,:])
lons = 0.5*(lons[:,1:]+lons[:,:-1])
lats = 0.5*(lats[:,1:]+lats[:,:-1])
return lons, lats, slope
def curvature_geometric(x, y):
"""
USAGE
-----
k = curvature_geometric(x, y)
Estimates the curvature k of a 2D curve (x,y) using a geometric method.
If your curve is given by two arrays, x and y, you can
approximate its curvature at each point by the reciprocal of the
radius of a circumscribing triangle with that point, the preceding
point, and the succeeding point as vertices. The radius of such a
triangle is one fourth the product of the three sides divided by its
area.
The curvature will be positive for curvature to the left and
negative for curvature to the right as you advance along the curve.
Note that if your data are too closely spaced together or subject
to substantial noise errors, this formula will not be very accurate.
Author: Roger Stafford
Source: http://www.mathworks.com/matlabcentral/newsreader/view_thread/125637
Translated to Python by André Palóczy, January 19, 2015.
"""
x,y = map(np.asanyarray, (x,y))
x1 = x[:-2]; x2 = x[1:-1]; x3 = x[2:]
y1 = y[:-2]; y2 = y[1:-1]; y3 = y[2:]
## a, b, and c are the three sides of the triangle.
a = np.sqrt((x3-x2)**2 + (y3-y2)**2)
b = np.sqrt((x1-x3)**2 + (y1-y3)**2)
c = np.sqrt((x2-x1)**2 + (y2-y1)**2)
## A is the area of the triangle.
A = 0.5*(x1*y2 + x2*y3 + x3*y1 - x1*y3 - x2*y1 - x3*y2)
## The reciprocal of the circumscribed radius, i.e., the curvature.
k = 4.0*A/(a*b*c)
return np.squeeze(k)
def get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw):
"""
USAGE
-----
lon_isob, lat_isob = get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw)
Retrieves the 'lon_isob','lat_isob' coordinates of a wanted 'iso'
isobath from a topography array 'topo', with 'lon_topo','lat_topo'
coordinates.
"""
lon, lat, topo = map(np.array, (lon, lat, topo))
fig, ax = plt.subplots()
cs = ax.contour(lon, lat, topo, [iso])
coll = cs.collections[0]
## Test all lines to find thel ongest one.
## This is assumed to be the wanted isobath.
ncoll = len(coll.get_paths())
siz = np.array([])
for n in range(ncoll):
path = coll.get_paths()[n]
siz = np.append(siz, path.vertices.shape[0])
f = siz.argmax()
xiso = coll.get_paths()[f].vertices[:, 0]
yiso = coll.get_paths()[f].vertices[:, 1]
plt.close()
# Smooth the isobath with a moving window.
# Periodize according to window length to avoid losing edges.
if smooth_isobath:
fleft = window_length//2
fright = -window_length//2 + 1
if cyclic:
xl = xiso[:fleft] + 360
xr = xiso[fright:] - 360
yl = yiso[:fleft]
yr = yiso[fright:]
xiso = np.concatenate((xr, xiso, xl))
yiso = np.concatenate((yr, yiso, yl))
# xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME
# yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME
# else:
# xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME
# yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME
return xiso, yiso
def angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw):
"""
USAGE
-----
lon_isob, lat_isob, angle = angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw)
Returns the coordinates ('lon_isob', 'lat_isob') and the angle an isobath
makes with the zonal direction for a topography array 'h' at coordinates
('lon', 'lat'). Defaults to the 100 m isobath.
If 'smooth_isobath'==True, smooths the isobath with a rolling window of type
'win_type' and 'window_length' points wide.
All keyword arguments are passed to 'pandas.rolling_window()'.
If 'plot_map'==True, plots a map showing
the isobath (and its soothed version if smooth_isobath==True).
"""
lon, lat, h = map(np.array, (lon, lat, h))
R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius.
deg2rad = np.pi/180. # [rad/deg]
# Extract isobath coordinates
xiso, yiso = get_isobath(lon, lat, h, isobath)
if cyclic: # Add cyclic point.
xiso = np.append(xiso, xiso[0])
yiso = np.append(yiso, yiso[0])
# Smooth the isobath with a moving window.
if smooth_isobath:
xiso = rolling_window(xiso, window=window_length, win_type=win_type, **kw)
yiso = rolling_window(yiso, window=window_length, win_type=win_type, **kw)
# From the coordinates of the isobath, find the angle it forms with the
# zonal axis, using points k+1 and k.
shth = yiso.size-1
theta = np.zeros(shth)
for k in range(shth):
dyk = R*(yiso[k+1]-yiso[k])
dxk = R*(xiso[k+1]-xiso[k])*np.cos(yiso[k]*deg2rad)
theta[k] = np.arctan2(dyk,dxk)
xisom = 0.5*(xiso[1:] + xiso[:-1])
yisom = 0.5*(yiso[1:] + yiso[:-1])
# Plots map showing the extracted isobath.
if plot_map:
fig, ax = plt.subplots()
m = bb_map([lon.min(), lon.max()], [lat.min(), lat.max()], projection='cyl', resolution='h', ax=ax)
m.plot(xisom, yisom, color='b', linestyle='-', zorder=3, latlon=True)
input("Press any key to continue.")
plt.close()
return xisom, yisom, theta
def isopyc_depth(z, dens0, isopyc=1027.75, dzref=1.):
"""
USAGE
-----
hisopyc = isopyc_depth(z, dens0, isopyc=1027.75)
Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc'
(defaults to 1027.75 kg/m3) from a 3D density array rho0 (in kg/m3) with shape
(nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz).
'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which
is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother
the resolution of the returned isopycnal depth array 'hisopyc'.
"""
z, dens0 = map(np.asanyarray, (z, dens0))
ny, nx = dens0.shape[1:]
zref = np.arange(z.min(), z.max(), dzref)
if np.ma.isMaskedArray(dens0):
dens0 = np.ma.filled(dens0, np.nan)
hisopyc = np.nan*np.ones((ny,nx))
for j in range(ny):
for i in range(nx):
dens0ij = dens0[:,j,i]
if np.logical_or(np.logical_or(isopyc<np.nanmin(dens0ij), np.nanmax(dens0ij)<isopyc), np.isnan(dens0ij).all()):
continue
else:
dens0ref = np.interp(zref, z, dens0ij) # Refined density profile.
dens0refn = near(dens0ref, isopyc)
fz=dens0ref==dens0refn
try:
hisopyc[j,i] = zref[fz]
except ValueError:
print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (j=%d,i=%d)."%(fz.sum(), j, i))
hisopyc[j,i] = np.nanmedian(zref[fz])
return hisopyc
def whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9):
"""
USAGE
-----
whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9)
Changes to white the color of the 'n' (defaults to 1)
neighboring patches about the zero contour created
by a command like 'cs = ax.contourf(x, y, z)'.
"""
x, y, z = map(np.asanyarray, (x,y,z))
white = (1.,1.,1.)
cslevs = cs.levels
assert 0. in cslevs
f0=np.where(cslevs==0.)[0][0]
f0m, f0p = f0-n, f0+n
c0m, c0p = cslevs[f0m], cslevs[f0p]
ax.contourf(x, y, z, levels=[c0m, c0p], linestyles='none', colors=[white, white], cmap=None, zorder=zorder)
def wind2stress(u, v, formula='large_pond1981-modified'):
"""
USAGE
-----
taux,tauy = wind2stress(u, v, formula='mellor2004')
Converts u,v wind vector components to taux,tauy
wind stress vector components.
"""
rho_air = 1.226 # kg/m3
mag = np.sqrt(u**2+v**2) # m/s
Cd = np.zeros( mag.shape ) # Drag coefficient.
if formula=='large_pond1981-modified':
# Large and Pond (1981) formula
# modified for light winds, as
# in Trenberth et al. (1990).
f=mag<=1.
Cd[f] = 2.18e-3
f=np.logical_and(mag>1.,mag<3.)
Cd[f] = (0.62+1.56/mag[f])*1e-3
f=np.logical_and(mag>=3.,mag<10.)
Cd[f] = 1.14e-3
f=mag>=10.
Cd[f] = (0.49 + 0.065*mag[f])*1e-3
elif formula=='mellor2004':
Cd = 7.5e-4 + 6.7e-5*mag
else:
np.disp('Unknown formula for Cd.')
pass
# Computing wind stress [N/m2]
taux = rho_air*Cd*mag*u
tauy = rho_air*Cd*mag*v
return taux,tauy
def gen_dates(start, end, dt='day', input_datetime=False):
"""
Returns a list of datetimes within the date range
from `start` to `end`, at a `dt` time interval.
`dt` can be 'second', 'minute', 'hour', 'day', 'week',
'month' or 'year'.
If `input_datetime` is False (default), `start` and `end`
must be a date in string form. If `input_datetime` is True,
`start` and `end` must be datetime objects.
Note
----
Modified from original function
by Filipe Fernandes (ocefpaf@gmail.com).
Example
-------
>>> from ap_tools.utils import gen_dates
>>> from datetime import datetime
>>> start = '1989-08-19'
>>> end = datetime.utcnow().strftime("%Y-%m-%d")
>>> gen_dates(start, end, dt='day')
"""
DT = dict(second=rrule.SECONDLY,
minute=rrule.MINUTELY,
hour=rrule.HOURLY,
day=rrule.DAILY,
week=rrule.WEEKLY,
month=rrule.MONTHLY,
year=rrule.YEARLY)
dt = DT[dt]
if input_datetime: # Input are datetime objects. No parsing needed.
dates = rrule.rrule(dt, dtstart=start, until=end)
else: # Input in string form, parse into datetime objects.
dates = rrule.rrule(dt, dtstart=parser.parse(start), until=parser.parse(end))
return list(dates)
def fmt_isobath(cs, fontsize=8, fmt='%g', inline=True, inline_spacing=7, manual=True, **kw):
"""
Formats the labels of isobath contours. `manual` is set to `True` by default,
but can be `False`, or a tuple/list of tuples with the coordinates of the labels.
All options are passed to plt.clabel().
"""
isobstrH = plt.clabel(cs, fontsize=fontsize, fmt=fmt, inline=inline, \
inline_spacing=inline_spacing, manual=manual, **kw)
for ih in range(0, len(isobstrH)): # Appends 'm' for meters at the end of the label.
isobstrh = isobstrH[ih]
isobstr = isobstrh.get_text()
isobstr = isobstr.replace('-','') + ' m'
isobstrh.set_text(isobstr)
def float2latex(f, ndigits=1):
"""
USAGE
-----
texstr = float2latex(f, ndigits=1)
Converts a float input into a latex-formatted
string with 'ndigits' (defaults to 1).
Adapted from:
http://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python
"""
float_str = "{0:.%se}"%ndigits
float_str = float_str.format(f)
base, exponent = float_str.split("e")
return "${0} \times 10^{{{1}}}$".format(base, int(exponent))
def mat2npz(matname):
"""
USAGE
-----
mat2npz(matname)
Extract variables stored in a .mat file,
and saves them in a .npz file.
"""
d = loadmat(matname)
_ = d.pop('__header__')
_ = d.pop('__globals__')
_ = d.pop('__version__')
npzname = matname[:-4] + '.npz'
np.savez(npzname,**d)
return None
def bb_map(lons, lats, ax, projection='merc', resolution='i', drawparallels=True, drawmeridians=True):
"""
USAGE
-----
m = bb_map(lons, lats, **kwargs)
Returns a Basemap instance with lon,lat bounding limits
inferred from the input arrays `lons`,`lats`.
Coastlines, countries, states, parallels and meridians
are drawn, and continents are filled.
"""
lons,lats = map(np.asanyarray, (lons,lats))
lonmin,lonmax = lons.min(),lons.max()
latmin,latmax = lats.min(),lats.max()
m = Basemap(llcrnrlon=lonmin,
urcrnrlon=lonmax,
llcrnrlat=latmin,
urcrnrlat=latmax,
projection=projection,
resolution=resolution,
ax=ax)
plt.ioff() # Avoid showing the figure.
m.fillcontinents(color='0.9', zorder=9)
m.drawcoastlines(zorder=10)
m.drawstates(zorder=10)
m.drawcountries(linewidth=2.0, zorder=10)
m.drawmapboundary(zorder=9999)
if drawmeridians:
m.drawmeridians(np.arange(np.floor(lonmin), np.ceil(lonmax), 1), linewidth=0.15, labels=[1, 0, 1, 0], zorder=12)
if drawparallels:
m.drawparallels(np.arange(np.floor(latmin), np.ceil(latmax), 1), linewidth=0.15, labels=[1, 0, 0, 0], zorder=12)
plt.ion()
return m
def dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r', marker='o', markersize=5):
"""
USAGE
-----
dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r')
Plots dots colored with a dual-color criterion,
separated by a threshold value.
"""
ax = plt.gca()
# Below-threshold dots.
f=z<=thresh
ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_low, mec=color_low)
# Above-threshold dots.
f=z>thresh
ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_high, mec=color_high)
if __name__=='__main__':
import doctest
doctest.testmod()
| mit |
camallen/aggregation | experimental/condor/animal_EM.py | 2 | 7334 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import csv
import matplotlib.pyplot as plt
import random
import math
import urllib
import matplotlib.cbook as cbook
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
sys.path.append("/home/greg/github/reduction/experimental/classifier")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
from iterativeEM import IterativeEM
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-23']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
big_userList = []
big_subjectList = []
animal_count = 0
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
alreadyDone = []
animals_in_image = {}
animal_index = -1
global_user_list = []
animal_to_image = []
zooniverse_list = []
condor_votes = {}
animal_votes = {}
#subject_vote = {}
results = []
to_sample_from = list(subject_collection.find({"state":"complete"}))
to_sample_from2 = list(subject_collection.find({"classification_count":1,"state":"active"}))
votes = []
sample = random.sample(to_sample_from,100)
#sample.extend(random.sample(to_sample_from2,1000))
# for subject_index,subject in enumerate(sample):
# print "== " + str(subject_index)
# zooniverse_id = subject["zooniverse_id"]
# for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
# if "user_name" in classification:
# user = classification["user_name"]
# else:
# user = classification["user_ip"]
#
# try:
# tt = index(big_userList,user)
# except ValueError:
# bisect.insort(big_userList,user)
for subject_index,subject in enumerate(sample):
print subject_index
zooniverse_id = subject["zooniverse_id"]
annotation_list = []
user_list = []
animal_list = []
#local_users = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
found_condor = False
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
animal_type = animal["animal"]
if not(animal_type in ["carcassOrScale","carcass"]):
annotation_list.append((x,y))
#print annotation_list
user_list.append(user)
animal_list.append(animal_type)
if not(user in global_user_list):
global_user_list.append(user)
#local_users.append(user)
if animal_type == "condor":
found_condor = True
except (ValueError,KeyError):
pass
#if there were any markings on the image, use divisive kmeans to cluster the points so that each
#cluster represents an image
if annotation_list != []:
user_identified,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True)
#fix split clusters if necessary
if user_identified != []:
user_identified,clusters = DivisiveKmeans(3).__fix__(user_identified,clusters,annotation_list,user_list,200)
for center,c in zip(user_identified,clusters):
animal_index += 1
#animal_votes.append([])
animal_to_image.append(zooniverse_id)
if not(zooniverse_id in animals_in_image):
animals_in_image[zooniverse_id] = [animal_index]
else:
animals_in_image[zooniverse_id].append(animal_index)
results.append((zooniverse_id,center))
for pt in c:
pt_index = annotation_list.index(pt)
user_index = global_user_list.index(user_list[pt_index])
animal_type = animal_list[annotation_list.index(pt)]
if animal_type == "condor":
votes.append((user_index,animal_index,1))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [1]
else:
animal_votes[animal_index].append(1)
else:
votes.append((user_index,animal_index,0))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [0]
else:
animal_votes[animal_index].append(0)
print "=====---"
#print votes
classify = IterativeEM()
classify.__classify__(votes)
most_likely = classify.__getMostLikely__()
estimates = classify.__getEstimates__()
X = []
Y = []
X2 = []
Y2 = []
#for subject_index,zooniverse_id in enumerate(big_subjectList):
for ii in range(animal_index):
x = np.mean(animal_votes[ii])
y = estimates[ii][1]
X.append(x)
Y.append(y)
if math.fabs(x-y) > 0.3:
zooniverse_id,(centerX,centerY) = results[ii]
print x,y
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)):
urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
plt.plot([centerX,],[centerY,],'o')
plt.show()
# #if ((x < 0.5) and (y > 0.5)) or ((x > 0.5) and (y < 0.5)):
# subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
# print x,y
# print subject["location"]["standard"]
# #print most_likely[subject_index],estimates[subject_index],np.mean(subject_vote[zooniverse_id])
#else:
# print estimates[subject_index],0
plt.plot(X,Y,'.',color="blue")
plt.plot(X2,Y2,'.',color="red")
plt.xlim((-0.05,1.05))
plt.ylim((-0.05,1.05))
plt.show() | apache-2.0 |
ominux/scikit-learn | examples/linear_model/plot_sgd_iris.py | 4 | 2171 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
pl.set_cmap(pl.cm.Paired)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
cs = pl.contourf(xx, yy, Z)
pl.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes, colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i])
pl.title("Decision surface of multi-class SGD")
pl.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
pl.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes, colors):
plot_hyperplane(i, color)
pl.legend()
pl.show()
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
vshtanko/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
soleneulmer/atmos | indicators_molec.py | 1 | 4324 | # ===================================
# CALCULATES Ioff and Ires
# Indicators described in Molecfit II
#
# Solene 20.09.2016
# ===================================
#
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
# from PyAstronomy import pyasl
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import stats
# from sklearn.metrics import mean_squared_error
# from math import sqrt
# from numpy import linalg as LA
# MOLECFIT
#
file_molecfit = '/home/solene/atmos/For_Solene/1203nm/output/molecfit_crires_solene_tac.fits'
hdu_molecfit = fits.open(file_molecfit)
data_molecfit = hdu_molecfit[1].data
cols_molecfit = hdu_molecfit[1].columns
# cols_molecfit.info()
rawwl_molecfit = data_molecfit.field('mlambda')
wl_molecfit = rawwl_molecfit*10e2
trans_molecfit = data_molecfit.field('mtrans')
cflux_molecfit = data_molecfit.field('cflux')
# TELFIT
#
file_telfit = '/home/solene/atmos/trans_telfit.txt'
wl_telfit, trans_telfit, wl_datatelfit, flux_datatelfit = np.loadtxt(
file_telfit, unpack=True)
# Interpolation
f_molecfit = interp1d(wl_molecfit, cflux_molecfit, kind='cubic')
ftrans_molecfit = interp1d(wl_molecfit, trans_molecfit, kind='cubic')
# f_tapas = interp1d(wlcorr_tapas, trans_tapas)
# **1** BINNED DATA
# 3 delta-lambda = 0.036
# Mean and std deviation of bins on the telluric CORRECTED spectrum
fluxmean_bin_means, bin_edges, binnumber = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
fluxstd_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic=np.std,
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
# **2** Bins where average TRANSMISSION is > 0.99
flux_trans_mean_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, ftrans_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
# cont_bin_means = flux_trans_mean_bin_means[flux_trans_mean_bin_means > 0.99]
ind_cont = np.where(flux_trans_mean_bin_means > 0.99)
ind_out = np.where((flux_trans_mean_bin_means < 0.95) &
(flux_trans_mean_bin_means > 0.1))
# plt.plot(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], 'kx')
# **3** Interpolation of the continuum cubic
# f_cont = interp1d(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], kind='cubic')
# Extrapolation with constant value spline
f_cont = InterpolatedUnivariateSpline(
bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], ext=3)
# bbox=[bin_centers[ind_cont][0], bin_centers[ind_cont][-1]],
# **5** Subtract cont to mean flux
# and Divide offset and std by interpolated continuum mean value
sys_offset = (fluxmean_bin_means - f_cont(bin_centers)) / f_cont(bin_centers)
flux_std = fluxstd_bin_means / f_cont(bin_centers)
# **6** independant WL = Divide by average absorption
absorp_molecfit = 1 - flux_trans_mean_bin_means
sys_offset_final = sys_offset / absorp_molecfit
flux_std_final = flux_std / absorp_molecfit
plt.figure(1)
plt.plot(wl_datatelfit, flux_datatelfit, 'b.-', label='Raw data')
# plt.hlines(flux_bin_means, bin_edges[:-1],
# bin_edges[1:], colors='g', lw=5, label='binned statistic of data')
plt.plot(bin_centers, fluxmean_bin_means, 'rx-', label='Mean binned data')
plt.plot(bin_centers, fluxstd_bin_means, 'kx-', label='Standard deviation binned data')
plt.legend()
plt.figure(2)
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wl_datatelfit, f_molecfit(wl_datatelfit),
'b-', label='Corrected data - Molecfit')
plt.plot(wl_datatelfit, f_cont(wl_datatelfit),
'k-', label='Interpolated Continuum')
plt.plot(sys_offset_final[ind_out], flux_std_final[ind_out], 'kx')
plt.plot(flux_trans_mean_bin_means[ind_out],
sys_offset_final[ind_out], 'kx', label='Ioff vs Transmission')
plt.plot(flux_trans_mean_bin_means[ind_out],
flux_std_final[ind_out], 'r.', label='Ires vs Transmission')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmission')
plt.legend(loc=3.)
plt.show()
| mit |
allenlavoie/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 28 | 5024 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
@deprecated(None, 'Please use tf.estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
sysid/kg | quora/Ensemble_CNN_TD_Quora.py | 1 | 12948 | # coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# In[2]:
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
# In[3]:
train.head()
# In[4]:
test.head()
# In[5]:
print(train.shape)
print(test.shape)
# In[6]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[7]:
train = train.fillna('empty')
test = test.fillna('empty')
# In[8]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[9]:
test.head()
# In[10]:
for i in range(6):
print(train.question1[i])
print(train.question2[i])
print()
# In[17]:
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\'+-=]", " ", text)
text = re.sub(r"\'s", " 's ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", " cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"\s{2,}", " ", text)
# Shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
# In[18]:
def process_questions(question_list, questions, question_list_name, dataframe):
# function to transform questions and display progress
for question in questions:
question_list.append(text_to_wordlist(question))
if len(question_list) % 100000 == 0:
progress = len(question_list)/len(dataframe) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
# In[19]:
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
# In[35]:
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
# In[36]:
test_question1 = []
process_questions(test_question1, test.question1, 'test_question1', test)
# In[37]:
test_question2 = []
process_questions(test_question2, test.question2, 'test_question2', test)
# # Using Keras
# In[38]:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import datetime, time, json
from keras.models import Sequential
from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Bidirectional, Convolution1D, GRU, MaxPooling1D, Convolution2D
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from collections import defaultdict
# In[39]:
# Count the number of different words in the reviews
word_count = defaultdict(int)
for question in train_question1:
word_count[question] += 1
print("train_question1 is complete.")
for question in train_question2:
word_count[question] += 1
print("train_question2 is complete")
for question in test_question1:
word_count[question] += 1
print("test_question1 is complete.")
for question in test_question2:
word_count[question] += 1
print("test_question2 is complete")
print("Total number of unique words:", len(word_count))
# In[40]:
# Find the length of questions
lengths = []
for question in train_question1:
lengths.append(len(question.split()))
for question in train_question2:
lengths.append(len(question.split()))
# Create a dataframe so that the values can be inspected
lengths = pd.DataFrame(lengths, columns=['counts'])
# In[41]:
lengths.counts.describe()
# In[42]:
np.percentile(lengths.counts, 99.5)
# In[43]:
num_words = 200000
train_questions = train_question1 + train_question2
tokenizer = Tokenizer(nb_words = num_words)
tokenizer.fit_on_texts(train_questions)
print("Fitting is compelte.")
train_question1_word_sequences = tokenizer.texts_to_sequences(train_question1)
print("train_question1 is complete.")
train_question2_word_sequences = tokenizer.texts_to_sequences(train_question2)
print("train_question2 is complete")
# In[44]:
test_question1_word_sequences = tokenizer.texts_to_sequences(test_question1)
print("test_question1 is complete.")
test_question2_word_sequences = tokenizer.texts_to_sequences(test_question2)
print("test_question2 is complete.")
# In[45]:
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# In[46]:
# Pad the questions so that they all have the same length.
max_question_len = 37
train_q1 = pad_sequences(train_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q1 is complete.")
train_q2 = pad_sequences(train_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q2 is complete.")
# In[47]:
test_q1 = pad_sequences(test_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q1 is complete.")
test_q2 = pad_sequences(test_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q2 is complete.")
# In[48]:
y_train = train.is_duplicate
# In[49]:
# Load GloVe to use pretrained vectors
# From this link: https://nlp.stanford.edu/projects/glove/
embeddings_index = {}
with open('glove.840B.300d.txt', encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings:', len(embeddings_index))
# In[50]:
# Need to use 300 for embedding dimensions to match GloVe vectors.
embedding_dim = 300
nb_words = len(word_index)
word_embedding_matrix = np.zeros((nb_words + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# In[66]:
units = 150
dropout = 0.25
nb_filter = 32
filter_length = 3
embedding_dim = 300
model1 = Sequential()
model1.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Flatten())
model2 = Sequential()
model2.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Flatten())
model3 = Sequential()
model3.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model3.add(TimeDistributed(Dense(embedding_dim)))
model3.add(BatchNormalization())
model3.add(Activation('relu'))
model3.add(Dropout(dropout))
model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
model4 = Sequential()
model4.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model4.add(TimeDistributed(Dense(embedding_dim)))
model4.add(BatchNormalization())
model4.add(Activation('relu'))
model4.add(Dropout(dropout))
model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
modela = Sequential()
modela.add(Merge([model1, model2], mode='concat'))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modelb = Sequential()
modelb.add(Merge([model3, model4], mode='concat'))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
model = Sequential()
model.add(Merge([modela, modelb], mode='concat'))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
#sgd = SGD(lr=0.01, decay=5e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[67]:
save_best_weights = 'question_pairs_weights.h5'
t0 = time.time()
callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True),
EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')]
history = model.fit([train_q1, train_q2],
y_train,
batch_size=200,
nb_epoch=100,
validation_split=0.1,
verbose=True,
shuffle=True,
callbacks=callbacks)
t1 = time.time()
print("Minutes elapsed: %f" % ((t1 - t0) / 60.))
# In[68]:
summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ],
'train_acc': history.history['acc'],
'valid_acc': history.history['val_acc'],
'train_loss': history.history['loss'],
'valid_loss': history.history['val_loss']})
# In[69]:
summary_stats
# In[70]:
plt.plot(summary_stats.train_loss)
plt.plot(summary_stats.valid_loss)
plt.show()
# In[71]:
min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss']))
print('Minimum loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss))
min_loss = round(min_loss, 4)
# In[72]:
model.load_weights(save_best_weights)
predictions = model.predict([test_q1, test_q2], verbose = True)
# In[73]:
#Create submission
submission = pd.DataFrame(predictions, columns=['is_duplicate'])
submission.insert(0, 'test_id', test.test_id)
file_name = 'submission_{}.csv'.format(min_loss)
submission.to_csv(file_name, index=False)
# In[74]:
submission.head(10)
| mit |
lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/build_app.py | 9 | 77527 | """
Mac OS X .app build command for distutils
Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller.
"""
from __future__ import print_function
import imp
import sys
import os
import zipfile
import plistlib
import shlex
import shutil
import textwrap
import pkg_resources
import collections
from modulegraph import modulegraph
from py2app.apptemplate.setup import main as script_executable
from py2app.util import mergecopy, make_exec
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules
from modulegraph.modulegraph import SourceModule, Package, Script
from modulegraph import zipio
import macholib.dyld
import macholib.MachOStandalone
import macholib.MachO
from macholib.util import flipwritable
from py2app.create_appbundle import create_appbundle
from py2app.create_pluginbundle import create_pluginbundle
from py2app.util import \
fancy_split, byte_compile, make_loader, imp_find_module, \
copy_tree, fsencoding, strip_files, in_system_path, makedirs, \
iter_platform_files, find_version, skipscm, momc, copy_file, \
copy_resource
from py2app.filters import \
not_stdlib_filter, not_system_filter, has_filename_filter
from py2app import recipes
from distutils.sysconfig import get_config_var, get_config_h_filename
PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK')
PLUGIN_SUFFIXES = {
'.qlgenerator': 'QuickLook',
'.mdimporter': 'Spotlight',
'.xpc': 'XPCServices',
'.service': 'Services',
'.prefPane': 'PreferencePanes',
'.iaplugin': 'InternetAccounts',
'.action': 'Automator',
}
try:
basestring
except NameError:
basestring = str
def rewrite_tkinter_load_commands(tkinter_path):
print("rewrite_tk", tkinter_path)
m = macholib.MachO.MachO(tkinter_path)
tcl_path = None
tk_path = None
rewrite_map = {}
for header in m.headers:
for idx, name, other in header.walkRelocatables():
if other.endswith('/Tk'):
if tk_path is not None and other != tk_path:
raise DistutilsPlatformError('_tkinter is linked to different Tk paths')
tk_path = other
elif other.endswith('/Tcl'):
if tcl_path is not None and other != tcl_path:
raise DistutilsPlatformError('_tkinter is linked to different Tcl paths')
tcl_path = other
if tcl_path is None or 'Tcl.framework' not in tcl_path:
raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework')
if tk_path is None or 'Tk.framework' not in tk_path:
raise DistutilsPlatformError('_tkinter is not linked a Tk.framework')
system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current']
system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current']
if not tcl_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tcl
ver = os.path.basename(os.path.dirname(tcl_path))
if ver not in system_tcl_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System')
rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,)
if not tk_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tk
ver = os.path.basename(os.path.dirname(tk_path))
if ver not in system_tk_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System')
rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,)
if rewrite_map:
print("Relinking _tkinter.so to system Tcl/Tk")
rewroteAny = False
for header in m.headers:
for idx, name, other in header.walkRelocatables():
data = rewrite_map.get(other)
if data:
if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())):
rewroteAny = True
if rewroteAny:
old_mode = flipwritable(m.filename)
try:
with open(m.filename, 'rb+') as f:
for header in m.headers:
f.seek(0)
header.write(f)
f.seek(0, 2)
f.flush()
finally:
flipwritable(m.filename, old_mode)
else:
print("_tkinter already linked against system Tcl/Tk")
def get_zipfile(dist, semi_standalone=False):
if sys.version_info[0] == 3:
if semi_standalone:
return "python%d.%d/site-packages.zip"%(sys.version_info[:2])
else:
return "python%d%d.zip"%(sys.version_info[:2])
return getattr(dist, "zipfile", None) or "site-packages.zip"
def framework_copy_condition(src):
# Skip Headers, .svn, and CVS dirs
return skipscm(src) and os.path.basename(src) != 'Headers'
class PythonStandalone(macholib.MachOStandalone.MachOStandalone):
def __init__(self, appbuilder, *args, **kwargs):
super(PythonStandalone, self).__init__(*args, **kwargs)
self.appbuilder = appbuilder
def copy_dylib(self, src):
dest = os.path.join(self.dest, os.path.basename(src))
if os.path.islink(src):
dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src)))
# Ensure that the orginal name also exists, avoids problems when
# the filename is used from Python (see issue #65)
#
# NOTE: The if statement checks that the target link won't
# point to itself, needed for systems like homebrew that
# store symlinks in "public" locations that point to
# files of the same name in a per-package install location.
link_dest = os.path.join(self.dest, os.path.basename(src))
if os.path.basename(link_dest) != os.path.basename(dest):
os.symlink(os.path.basename(dest), link_dest)
else:
dest = os.path.join(self.dest, os.path.basename(src))
return self.appbuilder.copy_dylib(src, dest)
def copy_framework(self, info):
destfn = self.appbuilder.copy_framework(info, self.dest)
dest = os.path.join(self.dest, info['shortname'] + '.framework')
self.pending.append((destfn, iter_platform_files(dest)))
return destfn
def iterRecipes(module=recipes):
for name in dir(module):
if name.startswith('_'):
continue
check = getattr(getattr(module, name), 'check', None)
if check is not None:
yield (name, check)
# A very loosely defined "target". We assume either a "script" or "modules"
# attribute. Some attributes will be target specific.
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# If modules is a simple string, assume they meant list
m = self.__dict__.get("modules")
if m and isinstance(m, basestring):
self.modules = [m]
def get_dest_base(self):
dest_base = getattr(self, "dest_base", None)
if dest_base: return dest_base
script = getattr(self, "script", None)
if script:
return os.path.basename(os.path.splitext(script)[0])
modules = getattr(self, "modules", None)
assert modules, "no script, modules or dest_base specified"
return modules[0].split(".")[-1]
def validate(self):
resources = getattr(self, "resources", [])
for r_filename in resources:
if not os.path.isfile(r_filename):
raise DistutilsOptionError(
"Resource filename '%s' does not exist" % (r_filename,))
def validate_target(dist, attr, value):
res = FixupTargets(value, "script")
other = {"app": "plugin", "plugin": "app"}
if res and getattr(dist, other[attr]):
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
def FixupTargets(targets, default_attribute):
if not targets:
return targets
try:
targets = eval(targets)
except:
pass
ret = []
for target_def in targets:
if isinstance(target_def, basestring):
# Create a default target object, with the string as the attribute
target = Target(**{default_attribute: target_def})
else:
d = getattr(target_def, "__dict__", target_def)
if default_attribute not in d:
raise DistutilsOptionError(
"This target class requires an attribute '%s'"
% (default_attribute,))
target = Target(**d)
target.validate()
ret.append(target)
return ret
def normalize_data_file(fn):
if isinstance(fn, basestring):
fn = convert_path(fn)
return ('', [fn])
return fn
def is_system():
prefix = sys.prefix
if os.path.exists(os.path.join(prefix, ".Python")):
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
return in_system_path(prefix)
def installation_info(version=None):
if version is None:
version = sys.version
if is_system():
return version[:3] + " (FORCED: Using vendor Python)"
else:
return version[:3]
class py2app(Command):
description = "create a Mac OS X application or plugin from Python scripts"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [
("app=", None,
"application bundle to be built"),
("plugin=", None,
"plugin bundle to be built"),
('optimize=', 'O',
"optimization level: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
("includes=", 'i',
"comma-separated list of modules to include"),
("packages=", 'p',
"comma-separated list of packages to include"),
("iconfile=", None,
"Icon file to use"),
("excludes=", 'e',
"comma-separated list of modules to exclude"),
("dylib-excludes=", 'E',
"comma-separated list of frameworks or dylibs to exclude"),
("datamodels=", None,
"xcdatamodels to be compiled and copied into Resources"),
("mappingmodels=", None,
"xcmappingmodels to be compiled and copied into Resources"),
("resources=", 'r',
"comma-separated list of additional data files and folders to include (not for code!)"),
("frameworks=", 'f',
"comma-separated list of additional frameworks and dylibs to include"),
("plist=", 'P',
"Info.plist template file, dict, or plistlib.Plist"),
("extension=", None,
"Bundle extension [default:.app for app, .plugin for plugin]"),
("graph", 'g',
"output module dependency graph"),
("xref", 'x',
"output module cross-reference as html"),
("no-strip", None,
"do not strip debug and local symbols from output"),
#("compressed", 'c',
# "create a compressed zipfile"),
("no-chdir", 'C',
"do not change to the data directory (Contents/Resources) [forced for plugins]"),
#("no-zip", 'Z',
# "do not use a zip file (XXX)"),
("semi-standalone", 's',
"depend on an existing installation of Python " + installation_info()),
("alias", 'A',
"Use an alias to current source file (for development only!)"),
("argv-emulation", 'a',
"Use argv emulation [disabled for plugins]."),
("argv-inject=", None,
"Inject some commands into the argv"),
("emulate-shell-environment", None,
"Emulate the shell environment you get in a Terminal window"),
("use-pythonpath", None,
"Allow PYTHONPATH to effect the interpreter's environment"),
("use-faulthandler", None,
"Enable the faulthandler in the generated bundle (Python 3.3 or later)"),
("verbose-interpreter", None,
"Start python in verbose mode"),
('bdist-base=', 'b',
'base directory for build library (default is build)'),
('dist-dir=', 'd',
"directory to put final built distributions in (default is dist)"),
('site-packages', None,
"include the system and user site-packages into sys.path"),
("strip", 'S',
"strip debug and local symbols from output (on by default, for compatibility)"),
("prefer-ppc", None,
"Force application to run translated on i386 (LSPrefersPPC=True)"),
('debug-modulegraph', None,
'Drop to pdb console after the module finding phase is complete'),
("debug-skip-macholib", None,
"skip macholib phase (app will not be standalone!)"),
("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"),
("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"),
("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"),
("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"),
("include-plugins=", None, "List of plugins to include"),
("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"),
("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"),
("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"),
]
boolean_options = [
#"compressed",
"xref",
"strip",
"no-strip",
"site-packages",
"semi-standalone",
"alias",
"argv-emulation",
#"no-zip",
"use-pythonpath",
"use-faulthandler",
"verbose-interpreter",
"no-chdir",
"debug-modulegraph",
"debug-skip-macholib",
"graph",
"prefer-ppc",
"emulate-shell-environment",
"force-system-tk",
"report-missing-from-imports",
"no-report-missing-conditional-import",
]
def initialize_options (self):
self.app = None
self.plugin = None
self.bdist_base = None
self.xref = False
self.graph = False
self.no_zip = 0
self.optimize = 0
if hasattr(sys, 'flags'):
self.optimize = sys.flags.optimize
self.arch = None
self.strip = True
self.no_strip = False
self.iconfile = None
self.extension = None
self.alias = 0
self.argv_emulation = 0
self.emulate_shell_environment = 0
self.argv_inject = None
self.no_chdir = 0
self.site_packages = False
self.use_pythonpath = False
self.use_faulthandler = False
self.verbose_interpreter = False
self.includes = None
self.packages = None
self.excludes = None
self.dylib_excludes = None
self.frameworks = None
self.resources = None
self.datamodels = None
self.mappingmodels = None
self.plist = None
self.compressed = True
self.semi_standalone = is_system()
self.dist_dir = None
self.debug_skip_macholib = False
self.debug_modulegraph = False
self.prefer_ppc = False
self.filters = []
self.eggs = []
self.qt_plugins = None
self.matplotlib_backends = None
self.extra_scripts = None
self.include_plugins = None
self.force_system_tk = False
self.report_missing_from_imports = False
self.no_report_missing_conditional_import = False
def finalize_options (self):
if not self.strip:
self.no_strip = True
elif self.no_strip:
self.strip = False
self.optimize = int(self.optimize)
if self.argv_inject and isinstance(self.argv_inject, basestring):
self.argv_inject = shlex.split(self.argv_inject)
self.includes = set(fancy_split(self.includes))
self.includes.add('encodings.*')
if self.use_faulthandler:
self.includes.add('faulthandler')
#if sys.version_info[:2] >= (3, 2):
# self.includes.add('pkgutil')
# self.includes.add('imp')
self.packages = set(fancy_split(self.packages))
self.excludes = set(fancy_split(self.excludes))
self.excludes.add('readline')
# included by apptemplate
self.excludes.add('site')
if getattr(self.distribution, 'install_requires', None):
self.includes.add('pkg_resources')
self.eggs = pkg_resources.require(self.distribution.install_requires)
# Setuptools/distribute style namespace packages uses
# __import__('pkg_resources'), and that import isn't detected at the
# moment. Forcefully include pkg_resources.
self.includes.add('pkg_resources')
dylib_excludes = fancy_split(self.dylib_excludes)
self.dylib_excludes = []
for fn in dylib_excludes:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
try:
res = macholib.dyld.dyld_find(fn)
except ValueError:
res = fn
self.dylib_excludes.append(res)
self.resources = fancy_split(self.resources)
frameworks = fancy_split(self.frameworks)
self.frameworks = []
for fn in frameworks:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
res = macholib.dyld.dyld_find(fn)
while res in self.dylib_excludes:
self.dylib_excludes.remove(res)
self.frameworks.append(res)
if not self.plist:
self.plist = {}
if isinstance(self.plist, basestring):
self.plist = plistlib.Plist.fromFile(self.plist)
if isinstance(self.plist, plistlib.Dict):
self.plist = dict(self.plist.__dict__)
else:
self.plist = dict(self.plist)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('bdist_base', 'bdist_base'))
if self.semi_standalone:
self.filters.append(not_stdlib_filter)
if self.iconfile is None and 'CFBundleIconFile' not in self.plist:
# Default is the generic applet icon in the framework
iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app',
'Contents', 'Resources', 'PythonApplet.icns')
if os.path.exists(iconfile):
self.iconfile = iconfile
self.runtime_preferences = list(self.get_runtime_preferences())
self.qt_plugins = fancy_split(self.qt_plugins)
self.matplotlib_backends = fancy_split(self.matplotlib_backends)
self.extra_scripts = fancy_split(self.extra_scripts)
self.include_plugins = fancy_split(self.include_plugins)
if self.datamodels:
print("WARNING: the datamodels option is deprecated, add model files to the list of resources")
if self.mappingmodels:
print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources")
def get_default_plist(self):
# XXX - this is all single target stuff
plist = {}
target = self.targets[0]
version = self.distribution.get_version()
if version == '0.0.0':
try:
version = find_version(target.script)
except ValueError:
pass
if not isinstance(version, basestring):
raise DistutilsOptionError("Version must be a string")
if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))):
raise DistutilsOptionError("Version must be a string")
plist['CFBundleVersion'] = version
name = self.distribution.get_name()
if name == 'UNKNOWN':
base = target.get_dest_base()
name = os.path.basename(base)
plist['CFBundleName'] = name
return plist
def get_runtime(self, prefix=None, version=None):
# XXX - this is a bit of a hack!
# ideally we'd use dylib functions to figure this out
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version
version = version[:3]
info = None
if os.path.exists(os.path.join(prefix, ".Python")):
# We're in a virtualenv environment, locate the real prefix
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
try:
fmwk = macholib.dyld.framework_find(prefix)
except ValueError:
info = None
else:
info = macholib.dyld.framework_info(fmwk)
if info is not None:
dylib = info['name']
runtime = os.path.join(info['location'], info['name'])
else:
dylib = 'libpython%s.dylib' % (sys.version[:3],)
runtime = os.path.join(prefix, 'lib', dylib)
return dylib, runtime
def symlink(self, src, dst):
try:
os.remove(dst)
except OSError:
pass
os.symlink(src, dst)
def get_runtime_preferences(self, prefix=None, version=None):
dylib, runtime = self.get_runtime(prefix=prefix, version=version)
yield os.path.join('@executable_path', '..', 'Frameworks', dylib)
if self.semi_standalone or self.alias:
yield runtime
def run(self):
if get_config_var('PYTHONFRAMEWORK') is None:
if not get_config_var('Py_ENABLE_SHARED'):
raise DistutilsPlatformError("This python does not have a shared library or framework")
else:
# Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared
# library build did not read the application environment correctly. The collection of
# if statements below gives a clean error message when py2app is started, instead of
# building a bundle that will give a confusing error message when started.
msg = "py2app is not supported for a shared library build with this version of python"
if sys.version_info[:2] < (2,7):
raise DistutilsPlatformError(msg)
elif sys.version_info[:2] == (2,7) and sys.version[3] < 4:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] < 2:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1:
raise DistutilsPlatformError(msg)
if hasattr(self.distribution, "install_requires") \
and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
build = self.reinitialize_command('build')
build.build_base = self.bdist_base
build.run()
self.create_directories()
self.fixup_distribution()
self.initialize_plist()
sys_old_path = sys.path[:]
extra_paths = [
os.path.dirname(target.script)
for target in self.targets
]
extra_paths.extend([build.build_platlib, build.build_lib])
self.additional_paths = [
os.path.abspath(p)
for p in extra_paths
if p is not None
]
sys.path[:0] = self.additional_paths
# this needs additional_paths
self.initialize_prescripts()
try:
self._run()
finally:
sys.path = sys_old_path
def iter_datamodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcdatamodel':
basefn = fn
fn += '.xcdatamodel'
destfn = os.path.basename(basefn) + '.mom'
yield fn, os.path.join(resdir, path, destfn)
def compile_datamodels(self, resdir):
for src, dest in self.iter_datamodels(resdir):
print("compile datamodel", src, "->", dest)
self.mkpath(os.path.dirname(dest))
momc(src, dest)
def iter_mappingmodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcmappingmodel':
basefn = fn
fn += '.xcmappingmodel'
destfn = os.path.basename(basefn) + '.cdm'
yield fn, os.path.join(resdir, path, destfn)
def compile_mappingmodels(self, resdir):
for src, dest in self.iter_mappingmodels(resdir):
self.mkpath(os.path.dirname(dest))
mapc(src, dest)
def iter_extra_plugins(self):
for item in self.include_plugins:
if isinstance(item, (list, tuple)):
subdir, path = item
else:
ext = os.path.splitext(item)[1]
try:
subdir = PLUGIN_SUFFIXES[ext]
path = item
except KeyError:
raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,))
yield path, os.path.join(subdir, os.path.basename(path))
def iter_data_files(self):
dist = self.distribution
allres = chain(getattr(dist, 'data_files', ()) or (), self.resources)
for (path, files) in (normalize_data_file(fn) for fn in allres):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
yield fn, os.path.join(path, os.path.basename(fn))
def collect_scripts(self):
# these contains file names
scripts = set()
for target in self.targets:
scripts.add(target.script)
scripts.update([
k for k in target.prescripts if isinstance(k, basestring)
])
if hasattr(target, 'extra_scripts'):
scripts.update(target.extra_scripts)
scripts.update(self.extra_scripts)
return scripts
def get_plist_options(self):
result = dict(
PyOptions=dict(
use_pythonpath=bool(self.use_pythonpath),
site_packages=bool(self.site_packages),
alias=bool(self.alias),
argv_emulation=bool(self.argv_emulation),
emulate_shell_environment=bool(self.emulate_shell_environment),
no_chdir=bool(self.no_chdir),
prefer_ppc=self.prefer_ppc,
verbose=self.verbose_interpreter,
use_faulthandler=self.use_faulthandler,
),
)
if self.optimize:
result['PyOptions']['optimize'] = self.optimize
return result
def initialize_plist(self):
plist = self.get_default_plist()
for target in self.targets:
plist.update(getattr(target, 'plist', {}))
plist.update(self.plist)
plist.update(self.get_plist_options())
if self.iconfile:
iconfile = self.iconfile
if not os.path.exists(iconfile):
iconfile = iconfile + '.icns'
if not os.path.exists(iconfile):
raise DistutilsOptionError("icon file must exist: %r"
% (self.iconfile,))
self.resources.append(iconfile)
plist['CFBundleIconFile'] = os.path.basename(iconfile)
if self.prefer_ppc:
plist['LSPrefersPPC'] = True
self.plist = plist
return plist
def run_alias(self):
self.app_files = []
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.update(extra_scripts)
dst = self.build_alias_executable(target, target.script, extra_scripts)
self.app_files.append(dst)
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
def collect_recipedict(self):
return dict(iterRecipes())
def get_modulefinder(self):
if self.debug_modulegraph:
debug = 4
else:
debug = 0
return find_modules(
scripts=self.collect_scripts(),
includes=self.includes,
packages=self.packages,
excludes=self.excludes,
debug=debug,
)
def collect_filters(self):
return [has_filename_filter] + list(self.filters)
def process_recipes(self, mf, filters, flatpackages, loader_files):
rdict = self.collect_recipedict()
while True:
for name, check in rdict.items():
rval = check(self, mf)
if rval is None:
continue
# we can pull this off so long as we stop the iter
del rdict[name]
print('*** using recipe: %s ***' % (name,))
if rval.get('packages'):
self.packages.update(rval['packages'])
find_needed_modules(mf, packages=rval['packages'])
for pkg in rval.get('flatpackages', ()):
if isinstance(pkg, basestring):
pkg = (os.path.basename(pkg), pkg)
flatpackages[pkg[0]] = pkg[1]
filters.extend(rval.get('filters', ()))
loader_files.extend(rval.get('loader_files', ()))
newbootstraps = list(map(self.get_bootstrap,
rval.get('prescripts', ())))
if rval.get('includes'):
find_needed_modules(mf, includes=rval['includes'])
if rval.get('resources'):
self.resources.extend(rval['resources'])
for fn in newbootstraps:
if isinstance(fn, basestring):
mf.run_script(fn)
for target in self.targets:
target.prescripts.extend(newbootstraps)
break
else:
break
def _run(self):
try:
if self.alias:
self.run_alias()
else:
self.run_normal()
except:
raise
# XXX - remove when not debugging
# distutils sucks
import pdb, sys, traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
print("Done!")
def filter_dependencies(self, mf, filters):
print("*** filtering dependencies ***")
nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters)
print('%d total' % (nodes_seen,))
print('%d filtered' % (nodes_removed,))
print('%d orphaned' % (nodes_orphaned,))
print('%d remaining' % (nodes_seen - nodes_removed,))
def get_appname(self):
return self.plist['CFBundleName']
def build_xref(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.html')
print("*** creating dependency html: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.create_xref(fp)
def build_graph(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.dot')
print("*** creating dependency graph: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.graphreport(fp, flatpackages=flatpackages)
def finalize_modulefinder(self, mf):
for item in mf.flatten():
if isinstance(item, Package) and item.filename == '-':
if sys.version_info[:2] <= (3,3):
fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py')
if not os.path.exists(fn):
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, 'w') as fp:
pass
item.filename = fn
py_files, extensions = parse_mf_results(mf)
# Remove all top-level scripts from the list of python files,
# those get treated differently.
py_files = [ item for item in py_files if not isinstance(item, Script) ]
extensions = list(extensions)
return py_files, extensions
def collect_packagedirs(self):
return list(filter(os.path.exists, [
os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '')
for pkg in self.packages
]))
def run_normal(self):
mf = self.get_modulefinder()
filters = self.collect_filters()
flatpackages = {}
loader_files = []
self.process_recipes(mf, filters, flatpackages, loader_files)
if self.debug_modulegraph:
import pdb
pdb.Pdb().set_trace()
self.filter_dependencies(mf, filters)
if self.graph:
self.build_graph(mf, flatpackages)
if self.xref:
self.build_xref(mf, flatpackages)
py_files, extensions = self.finalize_modulefinder(mf)
pkgdirs = self.collect_packagedirs()
self.create_binaries(py_files, pkgdirs, extensions, loader_files)
missing = []
syntax_error = []
invalid_bytecode = []
for module in mf.nodes():
if isinstance(module, modulegraph.MissingModule):
if module.identifier != '__main__':
missing.append(module)
elif isinstance(module, modulegraph.InvalidSourceModule):
syntax_error.append(module)
elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule):
invalid_bytecode.append(module)
if missing:
missing_unconditional = collections.defaultdict(set)
missing_fromimport = collections.defaultdict(set)
missing_fromimport_conditional = collections.defaultdict(set)
missing_conditional = collections.defaultdict(set)
for module in sorted(missing):
for m in mf.getReferers(module):
if m is None: continue # XXX
try:
ed = mf.edgeData(m, module)
except KeyError:
ed = None
if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo):
c = missing_unconditional
if ed.conditional or ed.function:
if ed.fromlist:
c = missing_fromimport_conditional
else:
c = missing_conditional
elif ed.fromlist:
c = missing_fromimport
c[module.identifier].add(m.identifier)
else:
missing_unconditional[module.identifier].add(m.identifier)
if missing_unconditional:
log.warn("Modules not found (unconditional imports):")
for m in sorted(missing_unconditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m]))))
log.warn("")
if missing_conditional and not self.no_report_missing_conditional_import:
log.warn("Modules not found (conditional imports):")
for m in sorted(missing_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m]))))
log.warn("")
if self.report_missing_from_imports and (
missing_fromimport or (
not self.no_report_missing_conditional_import and missing_fromimport_conditional)):
log.warn("Modules not found ('from ... import y'):")
for m in sorted(missing_fromimport):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m]))))
if not self.no_report_missing_conditional_import and missing_fromimport_conditional:
log.warn("")
log.warn("Conditional:")
for m in sorted(missing_fromimport_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m]))))
log.warn("")
if syntax_error:
log.warn("Modules with syntax errors:")
for module in sorted(syntax_error):
log.warn(" * %s"%(module.identifier))
log.warn("")
if invalid_bytecode:
log.warn("Modules with invalid bytecode:")
for module in sorted(invalid_bytecode):
log.warn(" * %s"%(module.identifier))
log.warn("")
def create_directories(self):
bdist_base = self.bdist_base
if self.semi_standalone:
self.bdist_dir = os.path.join(bdist_base,
'python%s-semi_standalone' % (sys.version[:3],), 'app')
else:
self.bdist_dir = os.path.join(bdist_base,
'python%s-standalone' % (sys.version[:3],), 'app')
if os.path.exists(self.bdist_dir):
shutil.rmtree(self.bdist_dir)
self.collect_dir = os.path.abspath(
os.path.join(self.bdist_dir, "collect"))
self.mkpath(self.collect_dir)
self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp"))
self.mkpath(self.temp_dir)
self.dist_dir = os.path.abspath(self.dist_dir)
self.mkpath(self.dist_dir)
self.lib_dir = os.path.join(self.bdist_dir,
os.path.dirname(get_zipfile(self.distribution, self.semi_standalone)))
self.mkpath(self.lib_dir)
self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload')
self.mkpath(self.ext_dir)
self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks')
self.mkpath(self.framework_dir)
def create_binaries(self, py_files, pkgdirs, extensions, loader_files):
print("*** create binaries ***")
dist = self.distribution
pkgexts = []
copyexts = []
extmap = {}
def packagefilter(mod, pkgdirs=pkgdirs):
fn = os.path.realpath(getattr(mod, 'filename', None))
if fn is None:
return None
for pkgdir in pkgdirs:
if fn.startswith(pkgdir):
return None
return fn
if pkgdirs:
py_files = list(filter(packagefilter, py_files))
for ext in extensions:
fn = packagefilter(ext)
if fn is None:
fn = os.path.realpath(getattr(ext, 'filename', None))
pkgexts.append(ext)
else:
if '.' in ext.identifier:
py_files.append(self.create_loader(ext))
copyexts.append(ext)
extmap[fn] = ext
# byte compile the python modules into the target directory
print("*** byte compile python files ***")
byte_compile(py_files,
target_dir=self.collect_dir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
for item in py_files:
if not isinstance(item, Package): continue
self.copy_package_data(item, self.collect_dir)
self.lib_files = []
self.app_files = []
# create the shared zipfile containing all Python modules
archive_name = os.path.join(self.lib_dir,
get_zipfile(dist, self.semi_standalone))
for path, files in loader_files:
dest = os.path.join(self.collect_dir, path)
self.mkpath(dest)
for fn in files:
destfn = os.path.join(dest, os.path.basename(fn))
if os.path.isdir(fn):
self.copy_tree(fn, destfn, preserve_symlinks=False)
else:
self.copy_file(fn, destfn)
arcname = self.make_lib_archive(archive_name,
base_dir=self.collect_dir, verbose=self.verbose,
dry_run=self.dry_run)
# XXX: this doesn't work with python3
#self.lib_files.append(arcname)
# build the executables
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.extend(target.extra_scripts)
dst = self.build_executable(
target, arcname, pkgexts, copyexts, target.script, extra_scripts)
exp = os.path.join(dst, 'Contents', 'MacOS')
execdst = os.path.join(exp, 'python')
if self.semi_standalone:
self.symlink(sys.executable, execdst)
else:
if os.path.exists(os.path.join(sys.prefix, ".Python")):
fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:]
if rest_path.startswith('.'):
rest_path = rest_path[1:]
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(os.path.join(prefix, rest_path), execdst)
else:
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(sys.executable, execdst)
if not self.debug_skip_macholib:
if self.force_system_tk:
print("force system tk")
resdir = os.path.join(dst, 'Contents', 'Resources')
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
tkinter_path = os.path.join(ext_dir, '_tkinter.so')
if os.path.exists(tkinter_path):
rewrite_tkinter_load_commands(tkinter_path)
else:
print("tkinter not found at", tkinter_path)
mm = PythonStandalone(self, dst, executable_path=exp)
dylib, runtime = self.get_runtime()
if self.semi_standalone:
mm.excludes.append(runtime)
else:
mm.mm.run_file(runtime)
for exclude in self.dylib_excludes:
info = macholib.dyld.framework_info(exclude)
if info is not None:
exclude = os.path.join(
info['location'], info['shortname'] + '.framework')
mm.excludes.append(exclude)
for fmwk in self.frameworks:
mm.mm.run_file(fmwk)
platfiles = mm.run()
if self.strip:
platfiles = self.strip_dsym(platfiles)
self.strip_files(platfiles)
self.app_files.append(dst)
def copy_package_data(self, package, target_dir):
"""
Copy any package data in a python package into the target_dir.
This is a bit of a hack, it would be better to identify python eggs
and copy those in whole.
"""
exts = [ i[0] for i in imp.get_suffixes() ]
exts.append('.py')
exts.append('.pyc')
exts.append('.pyo')
def datafilter(item):
for e in exts:
if item.endswith(e):
return False
return True
target_dir = os.path.join(target_dir, *(package.identifier.split('.')))
for dname in package.packagepath:
filenames = list(filter(datafilter, zipio.listdir(dname)))
for fname in filenames:
if fname in ('.svn', 'CVS', '.hg', '.git'):
# Scrub revision manager junk
continue
if fname in ('__pycache__',):
# Ignore PEP 3147 bytecode cache
continue
if fname.startswith('.') and fname.endswith('.swp'):
# Ignore vim(1) temporary files
continue
if fname.endswith('~') or fname.endswith('.orig'):
# Ignore backup files for common tools (hg, emacs, ...)
continue
pth = os.path.join(dname, fname)
# Check if we have found a package, exclude those
if zipio.isdir(pth):
# XXX: the 'and not' part is wrong, need to fix zipio.isdir
for p in zipio.listdir(pth):
if p.startswith('__init__.') and p[8:] in exts:
break
else:
if os.path.isfile(pth):
# Avoid extracting a resource file that happens
# to be zipfile.
# XXX: Need API in zipio for nicer code.
copy_file(pth, os.path.join(target_dir, fname))
else:
copy_tree(pth, os.path.join(target_dir, fname))
continue
elif zipio.isdir(pth) and (
zipio.isfile(os.path.join(pth, '__init__.py'))
or zipio.isfile(os.path.join(pth, '__init__.pyc'))
or zipio.isfile(os.path.join(pth, '__init__.pyo'))):
# Subdirectory is a python package, these will get included later on
# when the subpackage itself is included, ignore for now.
pass
else:
copy_file(pth, os.path.join(target_dir, fname))
def strip_dsym(self, platfiles):
""" Remove .dSYM directories in the bundled application """
#
# .dSYM directories are contain detached debugging information and
# should be completely removed when the "strip" option is specified.
#
if self.dry_run:
return platfiles
for dirpath, dnames, fnames in os.walk(self.appdir):
for nm in list(dnames):
if nm.endswith('.dSYM'):
print("removing debug info: %s/%s"%(dirpath, nm))
shutil.rmtree(os.path.join(dirpath, nm))
dnames.remove(nm)
return [file for file in platfiles if '.dSYM' not in file]
def strip_files(self, files):
unstripped = 0
stripfiles = []
for fn in files:
unstripped += os.stat(fn).st_size
stripfiles.append(fn)
log.info('stripping %s', os.path.basename(fn))
strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose)
stripped = 0
for fn in stripfiles:
stripped += os.stat(fn).st_size
log.info('stripping saved %d bytes (%d / %d)',
unstripped - stripped, stripped, unstripped)
def copy_dylib(self, src, dst):
# will be copied from the framework?
if src != sys.executable:
force, self.force = self.force, True
self.copy_file(src, dst)
self.force = force
return dst
def copy_versioned_framework(self, info, dst):
# XXX - Boy is this ugly, but it makes sense because the developer
# could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc.
# Saves a good deal of space, and I'm pretty sure this ugly
# hack is correct in the general case.
version = info['version']
if version is None:
return self.raw_copy_framework(info, dst)
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
vsplit = os.path.join(infile, 'Versions').split(os.sep)
def condition(src, vsplit=vsplit, version=version):
srcsplit = src.split(os.sep)
if (
len(srcsplit) > len(vsplit) and
srcsplit[:len(vsplit)] == vsplit and
srcsplit[len(vsplit)] != version and
not os.path.islink(src)
):
return False
# Skip Headers, .svn, and CVS dirs
return framework_copy_condition(src)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=condition)
def copy_framework(self, info, dst):
force, self.force = self.force, True
if info['shortname'] == PYTHONFRAMEWORK:
self.copy_python_framework(info, dst)
else:
self.copy_versioned_framework(info, dst)
self.force = force
return os.path.join(dst, info['name'])
def raw_copy_framework(self, info, dst):
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=framework_copy_condition)
def copy_python_framework(self, info, dst):
# XXX - In this particular case we know exactly what we can
# get away with.. should this be extended to the general
# case? Per-framework recipes?
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
indir = os.path.dirname(os.path.join(info['location'], info['name']))
outdir = os.path.dirname(os.path.join(dst, info['name']))
self.mkpath(os.path.join(outdir, 'Resources'))
pydir = 'python%s.%s'%(sys.version_info[:2])
# Create a symlink "for Python.frameworks/Versions/Current". This
# is required for the Mac App-store.
os.symlink(
os.path.basename(outdir),
os.path.join(os.path.dirname(outdir), "Current"))
# Likewise for two links in the root of the framework:
os.symlink(
'Versions/Current/Resources',
os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources'))
os.symlink(
os.path.join('Versions/Current', PYTHONFRAMEWORK),
os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK))
# Experiment for issue 57
if not os.path.exists(os.path.join(indir, 'include')):
alt = os.path.join(indir, 'Versions/Current')
if os.path.exists(os.path.join(alt, 'include')):
indir = alt
# distutils looks for some files relative to sys.executable, which
# means they have to be in the framework...
self.mkpath(os.path.join(outdir, 'include'))
self.mkpath(os.path.join(outdir, 'include', includedir))
self.mkpath(os.path.join(outdir, 'lib'))
self.mkpath(os.path.join(outdir, 'lib', pydir))
self.mkpath(os.path.join(outdir, 'lib', pydir, configdir))
fmwkfiles = [
os.path.basename(info['name']),
'Resources/Info.plist',
'include/%s/pyconfig.h'%(includedir),
]
if '_sysconfigdata' not in sys.modules:
fmwkfiles.append(
'lib/%s/%s/Makefile'%(pydir, configdir)
)
for fn in fmwkfiles:
self.copy_file(
os.path.join(indir, fn),
os.path.join(outdir, fn))
def fixup_distribution(self):
dist = self.distribution
# Trying to obtain app and plugin from dist for backward compatibility
# reasons.
app = dist.app
plugin = dist.plugin
# If we can get suitable values from self.app and self.plugin, we prefer
# them.
if self.app is not None or self.plugin is not None:
app = self.app
plugin = self.plugin
# Convert our args into target objects.
dist.app = FixupTargets(app, "script")
dist.plugin = FixupTargets(plugin, "script")
if dist.app and dist.plugin:
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
elif dist.app:
self.style = 'app'
self.targets = dist.app
elif dist.plugin:
self.style = 'plugin'
self.targets = dist.plugin
else:
raise DistutilsOptionError(
"You must specify either app or plugin")
if len(self.targets) != 1:
# XXX - support multiple targets?
raise DistutilsOptionError(
"Multiple targets not currently supported")
if not self.extension:
self.extension = '.' + self.style
# make sure all targets use the same directory, this is
# also the directory where the pythonXX.dylib must reside
paths = set()
for target in self.targets:
paths.add(os.path.dirname(target.get_dest_base()))
if len(paths) > 1:
raise DistutilsOptionError(
"all targets must use the same directory: %s" %
([p for p in paths],))
if paths:
app_dir = paths.pop() # the only element
if os.path.isabs(app_dir):
raise DistutilsOptionError(
"app directory must be relative: %s" % (app_dir,))
self.app_dir = os.path.join(self.dist_dir, app_dir)
self.mkpath(self.app_dir)
else:
# Do we allow to specify no targets?
# We can at least build a zipfile...
self.app_dir = self.lib_dir
def initialize_prescripts(self):
prescripts = []
prescripts.append('reset_sys_path')
if self.semi_standalone:
prescripts.append('semi_standalone_path')
if 0 and sys.version_info[:2] >= (3, 2) and not self.alias:
# Python 3.2 or later requires a more complicated
# bootstrap
prescripts.append('import_encodings')
if os.path.exists(os.path.join(sys.prefix, ".Python")):
# We're in a virtualenv, which means sys.path
# will be broken in alias builds unless we fix
# it.
if self.alias or self.semi_standalone:
prescripts.append("virtualenv")
prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,)))
if self.site_packages or self.alias:
import site
global_site_packages = not os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt'))
prescripts.append('virtualenv_site_packages')
prescripts.append(StringIO('_site_packages(%r, %r, %d)' % (
sys.prefix, sys.real_prefix, global_site_packages)))
elif self.site_packages or self.alias:
prescripts.append('site_packages')
if is_system():
prescripts.append('system_path_extras')
#if self.style == 'app':
# prescripts.append('setup_pkgresource')
included_subpkg = [pkg for pkg in self.packages if '.' in pkg]
if included_subpkg:
prescripts.append('setup_included_subpackages')
prescripts.append(StringIO('_path_hooks = %r'%(
included_subpkg)))
if self.emulate_shell_environment:
prescripts.append('emulate_shell_environment')
if self.argv_emulation and self.style == 'app':
prescripts.append('argv_emulation')
if 'CFBundleDocumentTypes' not in self.plist:
self.plist['CFBundleDocumentTypes'] = [
{
'CFBundleTypeOSTypes' : [
'****',
'fold',
'disk',
],
'CFBundleTypeRole': 'Viewer'
},
]
if self.argv_inject is not None:
prescripts.append('argv_inject')
prescripts.append(
StringIO('_argv_inject(%r)\n' % (self.argv_inject,)))
if self.style == 'app' and not self.no_chdir:
prescripts.append('chdir_resource')
if not self.alias:
prescripts.append('disable_linecache')
prescripts.append('boot_' + self.style)
else:
# Add ctypes prescript because it is needed to
# find libraries in the bundle, but we don't run
# recipes and hence the ctypes recipe is not used
# for alias builds.
prescripts.append('ctypes_setup')
if self.additional_paths:
prescripts.append('path_inject')
prescripts.append(
StringIO('_path_inject(%r)\n' % (self.additional_paths,)))
prescripts.append('boot_alias' + self.style)
newprescripts = []
for s in prescripts:
if isinstance(s, basestring):
newprescripts.append(
self.get_bootstrap('py2app.bootstrap.' + s))
else:
newprescripts.append(s)
for target in self.targets:
prescripts = getattr(target, 'prescripts', [])
target.prescripts = newprescripts + prescripts
def get_bootstrap(self, bootstrap):
if isinstance(bootstrap, basestring):
if not os.path.exists(bootstrap):
bootstrap = imp_find_module(bootstrap)[1]
return bootstrap
def get_bootstrap_data(self, bootstrap):
bootstrap = self.get_bootstrap(bootstrap)
if not isinstance(bootstrap, basestring):
return bootstrap.getvalue()
else:
with open(bootstrap, 'rU') as fp:
return fp.read()
def create_pluginbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating plugin bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
appdir, plist = create_pluginbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_appbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating application bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
pythonInfo = self.plist.setdefault('PythonInfoDict', {})
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
alias=bool(self.alias),
))
appdir, plist = create_appbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_bundle(self, target, script, use_runtime_preference=True):
fn = getattr(self, 'create_%sbundle' % (self.style,))
return fn(
target,
script,
use_runtime_preference=use_runtime_preference
)
def iter_frameworks(self):
for fn in self.frameworks:
fmwk = macholib.dyld.framework_info(fn)
if fmwk is None:
yield fn
else:
basename = fmwk['shortname'] + '.framework'
yield os.path.join(fmwk['location'], basename)
def build_alias_executable(self, target, script, extra_scripts):
# Build an alias executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
# symlink python executable
execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python')
prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python')
if os.path.exists(prefixPathExecutable):
pyExecutable = prefixPathExecutable
else:
pyExecutable = sys.executable
self.symlink(pyExecutable, execdst)
# make PYTHONHOME
pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
makedirs(pyhome)
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc'))
self.symlink(
os.path.join(realhome, 'config'),
os.path.join(pyhome, 'config'))
# symlink data files
# XXX: fixme: need to integrate automatic data conversion
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run, symlink=1)
except:
import traceback
traceback.print_exc()
raise
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run)
except:
import traceback
traceback.print_exc()
raise
# symlink frameworks
for src in self.iter_frameworks():
dest = os.path.join(
appdir, 'Contents', 'Frameworks', os.path.basename(src))
if src == dest:
continue
makedirs(os.path.dirname(dest))
self.symlink(os.path.abspath(src), dest)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),))
script_map = {}
for fn in extra_scripts:
tgt = os.path.realpath(fn)
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = tgt
elif fn.endswith('.py'):
script_map[fn[:-4]] = tgt
else:
script_map[fn] = tgt
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('try:\n')
bootfile.write(' _run()\n')
bootfile.write('except KeyboardInterrupt:\n')
bootfile.write(' pass\n')
bootfile.close()
target.appdir = appdir
return appdir
def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts):
# Build an executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
self.appdir = appdir
self.resdir = resdir
self.plist = plist
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
site_path = os.path.join(resdir, 'site.py')
byte_compile([
SourceModule('site', site_path),
],
target_dir=resdir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
if not self.dry_run:
os.unlink(site_path)
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),))
script_map = {}
for fn in extra_scripts:
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = fn
elif fn.endswith('.py'):
script_map[fn[:-4]] = fn
else:
script_map[fn] = fn
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('_run()\n')
bootfile.close()
self.copy_file(script, resdir)
for fn in extra_scripts:
self.copy_file(fn, resdir)
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
if sys.version_info[0] == 2 or self.semi_standalone:
arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
else:
arcdir = os.path.join(resdir, 'lib')
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
self.mkpath(pydir)
# The site.py file needs to be a two locations
# 1) in lib/pythonX.Y, to be found during normal startup and
# by the 'python' executable
# 2) in the resources directory next to the script for
# semistandalone builds (the lib/pythonX.Y directory is too
# late on sys.path to be found in that case).
#
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc'))
cfgdir = os.path.join(pydir, configdir)
realcfg = os.path.join(realhome, configdir)
real_include = os.path.join(sys.prefix, 'include')
if self.semi_standalone:
self.symlink(realcfg, cfgdir)
self.symlink(real_include, os.path.join(resdir, 'include'))
else:
self.mkpath(cfgdir)
if '_sysconfigdata' not in sys.modules:
# Recent enough versions of Python 2.7 and 3.x have
# an _sysconfigdata module and don't need the Makefile
# to provide the sysconfig data interface. Don't copy
# them.
for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config':
rfn = os.path.join(realcfg, fn)
if os.path.exists(rfn):
self.copy_file(rfn, os.path.join(cfgdir, fn))
inc_dir = os.path.join(resdir, 'include', includedir)
self.mkpath(inc_dir)
self.copy_file(get_config_h_filename(),
os.path.join(inc_dir, 'pyconfig.h'))
self.copy_file(arcname, arcdir)
if sys.version_info[0] != 2:
import zlib
self.copy_file(zlib.__file__, os.path.dirname(arcdir))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True)
self.copy_tree(self.framework_dir,
os.path.join(appdir, 'Contents', 'Frameworks'),
preserve_symlinks=True)
for pkg_name in self.packages:
pkg = self.get_bootstrap(pkg_name)
print('XXXX', pkg_name, pkg)
if self.semi_standalone:
# For semi-standalone builds don't copy packages
# from the stdlib into the app bundle, even when
# they are mentioned in self.packages.
p = Package(pkg_name, pkg)
if not not_stdlib_filter(p):
continue
dst = os.path.join(pydir, pkg_name)
self.mkpath(dst)
self.copy_tree(pkg, dst)
# FIXME: The python files should be bytecompiled
# here (see issue 101)
for copyext in copyexts:
fn = os.path.join(ext_dir,
(copyext.identifier.replace('.', os.sep) +
os.path.splitext(copyext.filename)[1])
)
self.mkpath(os.path.dirname(fn))
copy_file(copyext.filename, fn, dry_run=self.dry_run)
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
target.appdir = appdir
return appdir
def create_loader(self, item):
# Hm, how to avoid needless recreation of this file?
slashname = item.identifier.replace('.', os.sep)
pathname = os.path.join(self.temp_dir, "%s.py" % slashname)
if os.path.exists(pathname):
if self.verbose:
print("skipping python loader for extension %r"
% (item.identifier,))
else:
self.mkpath(os.path.dirname(pathname))
# and what about dry_run?
if self.verbose:
print("creating python loader for extension %r"
% (item.identifier,))
fname = slashname + os.path.splitext(item.filename)[1]
source = make_loader(fname)
if not self.dry_run:
with open(pathname, "w") as fp:
fp.write(source)
else:
return
return SourceModule(item.identifier, pathname)
def make_lib_archive(self, zip_filename, base_dir, verbose=0,
dry_run=0):
# Like distutils "make_archive", except we can specify the
# compression to use - default is ZIP_STORED to keep the
# runtime performance up.
# Also, we don't append '.zip' to the filename.
from distutils.dir_util import mkpath
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
save_cwd = os.getcwd()
os.chdir(base_dir)
for dirpath, dirnames, filenames in os.walk('.'):
if filenames:
# Ensure that there are directory entries for
# all directories in the zipfile. This is a
# workaround for <http://bugs.python.org/issue14905>:
# zipimport won't consider 'pkg/foo.py' to be in
# namespace package 'pkg' unless there is an
# entry for the directory (or there is a
# pkg/__init__.py file as well)
z.write(dirpath, dirpath)
for fn in filenames:
path = os.path.normpath(os.path.join(dirpath, fn))
if os.path.isfile(path):
z.write(path, path)
os.chdir(save_cwd)
z.close()
return zip_filename
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1, condition=None):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
This version doesn't bork on existing symlinks
"""
return copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run,
condition=condition)
| apache-2.0 |
ashhher3/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
imaculate/scikit-learn | sklearn/linear_model/randomized_l1.py | 11 | 24849 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
SophieIPP/ipp-macro-series-parser | ipp_macro_series_parser/demographie/parser.py | 1 | 3235 | # -*- coding: utf-8 -*-
# TAXIPP -- A French microsimulation model
# By: IPP <taxipp@ipp.eu>
#
# Copyright (C) 2012, 2013, 2014, 2015 IPP
# https://github.com/taxipp
#
# This file is part of TAXIPP.
#
# TAXIPP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TAXIPP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import pandas
import pkg_resources
from ipp_macro_series_parser.config import Config
config_parser = Config(
config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location)
)
xls_directory = os.path.join(config_parser.get('data', 'demographie_directory'), 'xls')
log = logging.getLogger(__name__)
def create_demographie_data_frame():
data_frame = pandas.DataFrame()
for year in range(1999, 2015 + 1):
file_path = os.path.join(xls_directory, u'pyramide-des-ages-{}.xls'.format(year))
skiprows = 5 - (year == 1999)
parse_cols = "A:E"
slice_start = 0
slice_end = 101
sheetname = 'France'
if year <= 2010:
sheetnames = ['France', u'France métropolitaine']
elif year == 2011:
sheetnames = ['{} France'.format(year), u"{} métropole".format(year)]
else:
sheetnames = ['Pyramide {} France'.format(year), u'Pyramide {} métropole'.format(year)]
for sheetname in sheetnames:
try:
df = pandas.read_excel(
file_path,
# na_values = '-',
sheetname = sheetname,
skiprows = skiprows,
parse_cols = parse_cols).iloc[slice_start:slice_end]
df['year'] = year
if sheetname in ['France', u'France métropolitaine']:
df['champ'] = sheetname
else:
df['champ'] = u'France métropolitaine' if u'métropole' in sheetname else 'France'
# All column name on one line
remove_cr = dict(
(column, column.replace(u"\n", " ").replace(" ", " ")) for column in df.columns)
df.rename(columns = remove_cr, inplace = True)
# Femmes _> Nombre de femmes etc
df.rename(columns = dict(
Femmes = "Nombre de femmes",
Hommes = "Nombre d'hommes"), inplace = True)
data_frame = pandas.concat((data_frame, df))
del df
except Exception, e:
print year
print sheetname
raise(e)
return pandas.melt(data_frame, id_vars = ['year', 'champ', u'Âge révolu', u'Année de naissance'])
| gpl-3.0 |
gef756/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
tsherwen/AC_tools | Scripts/2D_GEOSChem_slice_subregion_plotter_example.py | 1 | 2934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
| mit |
LaboratoireMecaniqueLille/crappy | crappy/blocks/grapher.py | 1 | 5641 | # coding: utf-8
import numpy as np
from .block import Block
from .._global import OptionalModule
try:
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
except (ModuleNotFoundError, ImportError):
plt = OptionalModule("matplotlib")
Button = OptionalModule("matplotlib")
class Grapher(Block):
"""The grapher receive data from a block (via a :ref:`Link`) and plots it."""
def __init__(self,
*labels,
length=0,
freq=2,
maxpt=20000,
window_size=(8, 8),
window_pos=None,
interp=True,
backend="TkAgg"):
"""Sets the args and initializes the parent class.
Args:
*labels (:obj:`tuple`): Tuples of the columns labels of input data for
plotting. You can add as much as you want, depending on your
performances. The first value is the `x` label, the second is the `y`
label.
length (:obj:`int`, optional): If `0` the graph is static and displays
all data from the start of the assay. Else only displays the last
``length`` received chunks, and drops the previous ones.
freq (:obj:`float`, optional): The refresh rate of the graph. May cause
high memory consumption if set too high.
maxpt (:obj:`int`, optional): The maximum number of points displayed on
the graph. When reaching this limit, the block deletes one point out of
two but this is almost invisible to the user.
window_size (:obj:`tuple`, optional): The size of the graph, in inches.
window_pos (:obj:`tuple`, optional): The position of the graph in pixels.
The first value is for the `x` direction, the second for the `y`
direction. The origin is the top left corner. Works with multiple
screens.
interp (:obj:`bool`, optional): If :obj:`True`, the points of data will
be linked to the following by straight lines. Else, each value wil be
displayed as constant until the next update.
backend (:obj:`int`, optional): The :mod:`matplotlib` backend to use.
Example:
::
graph = Grapher(('t(s)', 'F(N)'), ('t(s)', 'def(%)'))
will plot a dynamic graph with two lines plot (`F=f(t)` and `def=f(t)`).
::
graph = Grapher(('def(%)', 'F(N)'), length=0)
will plot a static graph.
::
graph = Grapher(('t(s)', 'F(N)'), length=30)
will plot a dynamic graph displaying the last 30 chunks of data.
"""
Block.__init__(self)
self.niceness = 10
self.length = length
self.freq = freq
self.maxpt = maxpt
self.window_size = window_size
self.window_pos = window_pos
self.interp = interp
self.backend = backend
self.labels = labels
def prepare(self):
if self.backend:
plt.switch_backend(self.backend)
self.f = plt.figure(figsize=self.window_size)
self.ax = self.f.add_subplot(111)
self.lines = []
for _ in self.labels:
if self.interp:
self.lines.append(self.ax.plot([], [])[0])
else:
self.lines.append(self.ax.step([], [])[0])
# Keep only 1/factor points on each line
self.factor = [1 for _ in self.labels]
# Count to drop exactly 1/factor points, no more and no less
self.counter = [0 for _ in self.labels]
legend = [y for x, y in self.labels]
plt.legend(legend, bbox_to_anchor=(-0.03, 1.02, 1.06, .102), loc=3,
ncol=len(legend), mode="expand", borderaxespad=1)
plt.xlabel(self.labels[0][0])
plt.ylabel(self.labels[0][1])
plt.grid()
self.axclear = plt.axes([.8, .02, .15, .05])
self.bclear = Button(self.axclear, 'Clear')
self.bclear.on_clicked(self.clear)
if self.window_pos:
mng = plt.get_current_fig_manager()
mng.window.wm_geometry("+%s+%s" % self.window_pos)
plt.draw()
plt.pause(.001)
def clear(self, event=None):
for line in self.lines:
line.set_xdata([])
line.set_ydata([])
self.factor = [1 for _ in self.labels]
self.counter = [0 for _ in self.labels]
def loop(self):
# We need to recv data from all the links, but keep
# ALL of the data, even with the same label (so not get_all_last)
data = self.recv_all_delay()
for i, (lx, ly) in enumerate(self.labels):
x, y = 0, 0 # So that if we don't find it, we do nothing
for d in data:
if lx in d and ly in d: # Find the first input with both labels
dx = d[lx][self.factor[i]-self.counter[i]-1::self.factor[i]]
dy = d[ly][self.factor[i]-self.counter[i]-1::self.factor[i]]
self.counter[i] = (self.counter[i]+len(d[lx])) % self.factor[i]
x = np.hstack((self.lines[i].get_xdata(), dx))
y = np.hstack((self.lines[i].get_ydata(), dy))
break
if isinstance(x, int):
break
if self.length and len(x) >= self.length:
# Remove the beginning if the graph is dynamic
x = x[-self.length:]
y = y[-self.length:]
elif len(x) > self.maxpt:
# Reduce the number of points if we have to many to display
print("[Grapher] Too many points on the graph {} ({}>{})".format(
i, len(x), self.maxpt))
x, y = x[::2], y[::2]
self.factor[i] *= 2
print("[Grapher] Resampling factor is now {}".format(self.factor[i]))
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
self.ax.relim() # Update the window
self.ax.autoscale_view(True, True, True)
self.f.canvas.draw() # Update the graph
self.f.canvas.flush_events()
def finish(self):
plt.close("all")
| gpl-2.0 |
zorojean/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
debsankha/bedtime-programming | ls222/visual-lotka.py | 1 | 5120 | #!/usr/bin/env python
from math import *
import thread
import random
import time
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import commands
import matplotlib.pyplot
class rodent:
def __init__(self):
self.time_from_last_childbirth=0
class felix:
def __init__(self):
self.size=0
self.is_virgin=1
self.reproduction_gap=0
self.time_from_last_childbirth=0
self.age=0
# print 'painted'
class gui_display:
def __init__(self):
self.gladefile='./lvshort.glade'
self.wTree = gtk.glade.XML(self.gladefile)
dic={"on_start_clicked":self.dynamics,"on_mainwin_destroy":gtk.main_quit}
self.wTree.signal_autoconnect(dic)
self.wTree.get_widget("mainwin").show()
self.wTree.get_widget("image").set_from_file("./start.png")
def visualize(self,catn,mousen):
# while True:
num=40
size=10
catno=catn*num**2/(catn+mousen)
cats=random.sample(range(num**2),catno)
for i in range(num**2):
if i in cats:
self.dic[i].color=visual.color.red
else :
self.dic[i].color=visual.color.green
def dynamics(self,*args,**kwargs):
self.wTree.get_widget("image").set_from_file("./wait.png")
print 'dynamics started'
mouse_size=20 #ind parameter
cat_mature_size=60 #ind parameter
# catch_rate=5*10**-4 #parameter
# cat_efficiency=0.8 #parameter
# a=0.2 #will get from slider
# c=0.2 #will get from slider
cat_catch_rate=self.wTree.get_widget("catchrate").get_value()*10**-4 #parameter
cat_efficiency=self.wTree.get_widget("efficiency").get_value() #parameter
a=self.wTree.get_widget("a").get_value() #parameter
c=self.wTree.get_widget("c").get_value() #parameter
mouse_no=1000
cat_no=1000
t=0
tmax=200
dt=1
timeli=[]
miceli=[]
catli=[]
mice=[rodent() for i in range(mouse_no)]
cats=[felix() for i in range(cat_no)]
catn=len(cats)
mousen=len(mice)
self.dic={}
num=40
size=10
catno=catn*num**2/(catn+mousen)
disp_cats=random.sample(range(num**2),catno)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
for i in range(num**2):
coords=((i%num)*size*2-num*size,(i/num)*size*2-num*size)
if i in disp_cats:
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red)
else :
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green)
print self.dic
catn=len(cats)
mousen=len(mice)
data=open('tempdata.dat','w')
timestart=time.time()
while (len(mice)>0 or len(cats)>0) and t<tmax and (time.time()-timestart)<60:
# print time.time()-timestart
catn=len(cats)
mousen=len(mice)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
# self.visualize(catn,mousen)
thread.start_new_thread(self.visualize,(catn,mousen))
for mouse in mice:
if mouse.time_from_last_childbirth>=1/a:
mouse.time_from_last_childbirth=0
mice.append(rodent())
mouse.time_from_last_childbirth+=dt
ind=0
while ind<len(cats):
cat=cats[ind]
cat.age+=dt
num=cat_catch_rate*dt*len(mice)
for i in range(int(num)):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if (num-int(num))>random.uniform(0,1):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if cat.size>cat_mature_size:
if cat.is_virgin:
cat.is_virgin=0
cat.reproduction_gap=cat.age
cats.append(felix())
else :
if cat.time_from_last_childbirth>cat.reproduction_gap:
cats.append(felix())
cat.time_from_last_childbirth=0
if cat.is_virgin==0:
cat.time_from_last_childbirth+=dt
if len(cats)>0:
if c*dt*2*atan(0.05*len(cats))/pi>random.uniform(0,1):
cats.pop(ind)
else :
ind+=1
else :
ind+=1
timeli.append(t)
miceli.append(len(mice))
catli.append(len(cats))
print t,'\t',len(mice),'\t',len(cats)
print >> data, t,'\t',len(mice),'\t',len(cats)
t+=dt
data.close()
upper_limit=1.2*len(mice)
pltfile=open('lv.plt','w')
print >> pltfile,"""se te png
se o "/tmp/lv.png"
unse ke
#se yrange [0:%f]
se xl "Time"
se yl "Number of Prey/Predator"
p 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l
"""%upper_limit
pltfile.close()
commands.getoutput('gnuplot lv.plt')
self.wTree.get_widget("image").set_from_file("/tmp/lv.png")
print 'dynamics ended'
reload(matplotlib.pyplot)
matplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-')
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel("Number of mice and cats")
matplotlib.pyplot.show()
gui=gui_display()
gtk.main()
#dynamics()
#import matplotlib.pyplot as plt
#plt.plot(timeli,miceli,'go',timeli,catli,'ro')
#plt.show()
| gpl-3.0 |
karvenka/sp17-i524 | project/S17-IR-P014/code/delay.py | 15 | 5276 | import sys
import csv
import sip
#import org.apache.log4j.{Level, Logger}
import matplotlib
#matplotlib.user('agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from pyspark import SparkContext, SparkConf
from datetime import datetime
from operator import add, itemgetter
from collections import namedtuple
from datetime import datetime
import os
import time
from StringIO import StringIO
#Defining the fields, Creating a Flights class with the following fields as a tuple
#Each row is converted into a list
timestarted = time.time()
fields = ('date', 'airline', 'flightnum', 'origin', 'dest', 'dep',
'dep_delay', 'arv', 'arv_delay', 'airtime', 'distance')
Flight = namedtuple('Flight', fields, verbose=True)
DATE_FMT = "%Y-%m-%d"
TIME_FMT = "%H%M"
# User Defined Functions
def toCSVLine(data):
return ','.join(str(d) for d in data)
def split(line):
reader = csv.reader(StringIO(line))
return reader.next()
def parse(row):
row[0] = datetime.strptime(row[0], DATE_FMT).time()
row[5] = datetime.strptime(row[5], TIME_FMT).time()
row[6] = float(row[6])
row[7] = datetime.strptime(row[7], TIME_FMT).time()
row[8] = float(row[8])
row[9] = float(row[9])
row[10] = float(row[10])
return Flight(*row[:11])
def notHeader(row):
return "Description" not in row
def plot(airlinesdelays):
airlines = [d[0] for d in airlinesdelays]
minutes = [d[1] for d in airlinesdelays]
index = list(xrange(len(airlines)))
#Above we retrieved the respective columns from the list
#Here we mention the plot as a horizontal bar plot
fig, axe = plt.subplots()
bars = axe.barh(index, minutes)
# Add the total minutes to the right
for idx, air, min in zip(index, airlines, minutes):
if min > 0:
bars[idx].set_color('#d9230f')
axe.annotate(" %0.0f min" % min, xy=(min+1, idx+0.5), va='center')
else:
bars[idx].set_color('#469408')
axe.annotate(" %0.0f min" % min, xy=(10, idx+0.5), va='center')
# Set the ticks
ticks = plt.yticks([idx+ 0.5 for idx in index], airlines)
xt = plt.xticks()[0]
plt.xticks(xt, [' '] * len(xt))
# minimize chart junk
plt.grid(axis = 'x', color ='white', linestyle='-')
plt.title('Total Minutes Delayed per Airline')
plt.savefig('airlines.png')
#airlines.filter(notHeader).take(10)
#main method is the entry point for the following program
if __name__ == "__main__":
conf = SparkConf().setAppName("average")
sc = SparkContext(conf=conf)
#setting log level to error
# val rootLogger = Logger.getRootLogger()
# rootLogger.setLevel(Level.ERROR)
#importing data from HDFS for performing analysis
airlines = sc.textFile(sys.argv[1])
# airlines = sc.textFile("hdfs://192.168.1.8:8020/fltdata/airlines.csv")
flights = sc.textFile(sys.argv[2])
airports =sc.textFile(sys.argv[3])
airlinesParsed = dict(airlines.map(split).collect())
airportsParsed= airports.filter(notHeader).map(split)
# print "without header and spliting up", airlines.take(10)
# print "without header and spliting up", airlines.take(10)
flightsParsed= flights.map(lambda x: x.split(',')).map(parse)
#print "The average delay is "+str(sumCount[0]/float(sumCount[1]))
airportDelays = flightsParsed.map(lambda x: (x.origin,x.dep_delay))
# First find the total delay per airport
airportTotalDelay=airportDelays.reduceByKey(lambda x,y:x+y)
# Find the count per airport
airportCount=airportDelays.mapValues(lambda x:1).reduceByKey(lambda x,y:x+y)
# Join to have the sum, count in 1 RDD
airportSumCount=airportTotalDelay.join(airportCount)
# Compute avg delay per airport
airportAvgDelay=airportSumCount.mapValues(lambda x : x[0]/float(x[1]))
airportDelay = airportAvgDelay.sortBy(lambda x:-x[1])
print "", airportDelay.take(10)
airportLookup=airportsParsed.collectAsMap()
#airlineLookup=airlinesParsed.collectAsMap()
airline_lookup = sc.broadcast(airlinesParsed)
airlinesdelays = flightsParsed.map(lambda f: (airline_lookup.value[f.airline],add(f.dep_delay, f.arv_delay)))
airlinesdelays = delays.reduceByKey(add).collect()
airlinesdelays = sorted(delays, key=itemgetter(1))
#tenairlines = delays.map(toCSVLine)
ten = airportAvgDelay.map(lambda x: (airportLookup[x[0]],x[1]))
#print "", ten.take(10)
for d in airlinesdelays:
print "%0.0f minutes delayed\t%s" % (d[1], d[0])
airportBC=sc.broadcast(airportLookup)
topTenAirportsWithDelays = airportAvgDelay.map(lambda x: (airportBC.value[x[0]],x[1])).sortBy(lambda x:-x[1])
lines = topTenAirportsWithDelays.take(10)
topten = "/home/hadoop/"
tenairlines = "/home/hadoop/"
#For collecting the outputs into csv files
with open('topten', "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in lines:
writer.writerows([val])
with open('tenairlines',"w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in delays:
writer.writerows([val])
plot(airlinesdelays)
#Final time taken will be calculated here
timetaken = time.time()-timestarted
print "", timetaken
| apache-2.0 |
garrettkatz/directional-fibers | dfibers/experiments/levy_opt/levy_opt.py | 1 | 6952 | """
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
| mit |
jreback/pandas | pandas/io/formats/latex.py | 2 | 25201 | """
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: Optional[Union[str, Tuple[str, str]]]
) -> Tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
short_caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[Union[str, Tuple[str, str]]] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> Optional[str]:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
junbochen/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
versae/DH2304 | data/arts1.py | 1 | 1038 | import numpy as np
import pandas as pd
arts = pd.DataFrame()
# Clean the dates so you only see numbers.
def clean_years(value):
result = value
chars_to_replace = ["c.", "©", ", CARCC", "no date", "n.d.", " SODRAC", ", CA", " CARCC", ""]
chars_to_split = ["-", "/"]
if isinstance(result, str):
for char in chars_to_split:
if char in result:
result = result.split(char)[1].strip()
for char in chars_to_replace:
result = result.replace(char, "")
if result == "":
return np.nan
else:
return int(result)
else:
return result
arts['execution_date'] = arts['execution_date'].apply(clean_years)
arts.head()
# If a year is lower than 100, then is referred to 1900. For example, 78 is actually 1978, and that needs to be fixed too.
def clean_year_99(value):
if value < 100:
return value + 1900
else:
return value
arts["execution_date"] = arts["execution_date"].apply(clean_year_99)
arts.head()
| mit |
nakul02/systemml | src/main/python/systemml/classloader.py | 4 | 7952 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['createJavaObject', 'jvm_stdout', 'default_jvm_stdout', 'default_jvm_stdout_parallel_flush', 'set_default_jvm_stdout', 'get_spark_context' ]
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
_loadedSystemML = False
def get_spark_context():
"""
Internal method to get already initialized SparkContext. Developers should always use
get_spark_context() instead of SparkContext._active_spark_context to ensure SystemML loaded.
Returns
-------
sc: SparkContext
SparkContext
"""
if SparkContext._active_spark_context is not None:
sc = SparkContext._active_spark_context
global _loadedSystemML
if not _loadedSystemML:
createJavaObject(sc, 'dummy')
_loadedSystemML = True
return sc
else:
raise Exception('Expected spark context to be created.')
_in_jvm_stdout = False
default_jvm_stdout = True
default_jvm_stdout_parallel_flush = True
def set_default_jvm_stdout(enable, parallel_flush=True):
"""
This is useful utility method to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
enable: boolean
Should flush the stdout by default when mlcontext.execute is invoked
parallel_flush: boolean
Should flush the stdout in parallel
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
default_jvm_stdout = enable
default_jvm_stdout_parallel_flush = parallel_flush
# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
# Example usage:
# with jvm_stdout():
# ml.execute(script)
class jvm_stdout(object):
"""
This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
parallel_flush: boolean
Should flush the stdout in parallel
"""
def __init__(self, parallel_flush=False):
self.util = get_spark_context()._jvm.org.apache.sysml.api.ml.Utils()
self.parallel_flush = parallel_flush
self.t = threading.Thread(target=self.flush_stdout)
self.stop = False
def flush_stdout(self):
while not self.stop:
time.sleep(1) # flush stdout every 1 second
str = self.util.flushStdOut()
if str != '':
str = str[:-1] if str.endswith('\n') else str
print(str)
def __enter__(self):
global _in_jvm_stdout
if _in_jvm_stdout:
# Allow for nested jvm_stdout
self.donotRedirect = True
else:
self.donotRedirect = False
self.util.startRedirectStdOut()
if self.parallel_flush:
self.t.start()
_in_jvm_stdout = True
def __exit__(self, *args):
global _in_jvm_stdout
if not self.donotRedirect:
if self.parallel_flush:
self.stop = True
self.t.join()
print(self.util.stopRedirectStdOut())
_in_jvm_stdout = False
_initializedSparkSession = False
def _createJavaObject(sc, obj_type):
# -----------------------------------------------------------------------------------
# Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession.
# This is done at toDF() rather than import level to avoid creation of SparkSession in worker processes.
global _initializedSparkSession
if not _initializedSparkSession:
_initializedSparkSession = True
SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))
# -----------------------------------------------------------------------------------
if obj_type == 'mlcontext':
return sc._jvm.org.apache.sysml.api.mlcontext.MLContext(sc._jsc)
elif obj_type == 'dummy':
return sc._jvm.org.apache.sysml.utils.SystemMLLoaderUtils()
else:
raise ValueError('Incorrect usage: supported values: mlcontext or dummy')
def _getJarFileNames(sc):
import imp, fnmatch
jar_file_name = '_ignore.jar'
java_dir = os.path.join(imp.find_module("systemml")[1], "systemml-java")
jar_file_names = []
for file in os.listdir(java_dir):
if fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar'):
jar_file_names = jar_file_names + [ os.path.join(java_dir, file) ]
return jar_file_names
def _getLoaderInstance(sc, jar_file_name, className, hint):
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
if os.path.isfile(jar_file_name):
sc._jsc.addJar(jar_file_name)
jar_file_url = sc._jvm.java.io.File(jar_file_name).toURI().toURL()
url_class = sc._jvm.java.net.URL
jar_file_url_arr = sc._gateway.new_array(url_class, 1)
jar_file_url_arr[0] = jar_file_url
url_class_loader = sc._jvm.java.net.URLClassLoader(jar_file_url_arr, sc._jsc.getClass().getClassLoader())
c1 = sc._jvm.java.lang.Class.forName(className, True, url_class_loader)
return c1.newInstance()
else:
raise ImportError(err_msg + ' Hint: Download the jar from http://systemml.apache.org/download and ' + hint )
def createJavaObject(sc, obj_type):
"""
Performs appropriate check if SystemML.jar is available and returns the handle to MLContext object on JVM
Parameters
----------
sc: SparkContext
SparkContext
obj_type: Type of object to create ('mlcontext' or 'dummy')
"""
try:
return _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
ret = None
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
hint = 'Provide the following argument to pyspark: --driver-class-path '
jar_file_names = _getJarFileNames(sc)
if len(jar_file_names) != 2:
raise ImportError('Expected only systemml and systemml-extra jars, but found ' + str(jar_file_names))
for jar_file_name in jar_file_names:
if 'extra' in jar_file_name:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.api.dl.Caffe2DMLLoader', hint + 'systemml-*-extra.jar')
x.loadCaffe2DML(jar_file_name)
else:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.utils.SystemMLLoaderUtils', hint + 'systemml-*.jar')
x.loadSystemML(jar_file_name)
try:
ret = _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
raise ImportError(err_msg + ' Hint: ' + hint + jar_file_name)
return ret
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/mpl_toolkits/axisartist/axisline_style.py | 8 | 5277 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle:
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super(AxislineStyle._Base, self).__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super(AxislineStyle.SimpleArrow, self).__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
| mit |
yunfeilu/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
timqian/sms-tools | lectures/5-Sinusoidal-model/plots-code/sineModelAnal-flute.py | 24 | 1179 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/flute-A4.wav'))
w = np.blackman(601)
N = 1024
H = 150
t = -80
minSineDur = .1
maxnSines = 150
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur)
plt.figure(1, figsize=(9.5, 5))
maxplotfreq = 5000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (flute-A4.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-flute.png')
plt.show() | agpl-3.0 |
jcrist/blaze | blaze/compute/tests/test_bcolz_compute.py | 9 | 5874 | from __future__ import absolute_import, division, print_function
import pytest
bcolz = pytest.importorskip('bcolz')
from datashape import discover, dshape
import numpy as np
import pandas.util.testing as tm
from odo import into
from blaze import by
from blaze.expr import symbol
from blaze.compute.core import compute, pre_compute
from blaze.compute.bcolz import get_chunksize
b = bcolz.ctable(np.array([(1, 1., np.datetime64('2010-01-01')),
(2, 2., np.datetime64('NaT')),
(3, 3., np.datetime64('2010-01-03'))],
dtype=[('a', 'i8'),
('b', 'f8'),
('date', 'datetime64[D]')]))
t = symbol('t', 'var * {a: int64, b: float64, date: ?date}')
to = symbol('to', 'var * {a: int64, b: float64}')
bo = bcolz.ctable(np.array([(1, 1.), (2, 2.), (3, np.nan)],
dtype=[('a', 'i8'), ('b', 'f8')]))
def test_discover():
assert discover(b) == dshape('3 * {a: int64, b: float64, date: date}')
assert discover(b['a']) == dshape('3 * int64')
def test_reductions():
assert compute(t.a.sum(), b) == 6
assert compute(t.a.min(), b) == 1
assert compute(t.a.max(), b) == 3
assert compute(t.a.mean(), b) == 2.0
assert abs(compute(t.a.std(), b) - np.std([1, 2, 3])) < 1e-5
assert abs(compute(t.a.var(), b) - np.var([1, 2, 3])) < 1e-5
assert abs(compute(t.a.std(unbiased=True), b) - np.std([1, 2, 3],
ddof=1)) < 1e-5
assert abs(compute(t.a.var(unbiased=True), b) - np.var([1, 2, 3],
ddof=1)) < 1e-5
assert len(list(compute(t.distinct(), b))) == 3
assert len(list(compute(t.a.distinct(), b))) == 3
assert compute(t.a.nunique(), b) == 3
assert isinstance(compute(t.a.nunique(), b), np.integer)
assert compute(t.a.count(), b) == 3
assert isinstance(compute(t.date.count(), b), np.integer)
assert compute(t.date.nunique(), b) == 2
assert isinstance(compute(t.date.nunique(), b), np.integer)
assert compute(t.date.count(), b) == 2
assert isinstance(compute(t.a.count(), b), np.integer)
assert compute(t.a[0], b) == 1
assert compute(t.a[-1], b) == 3
assert compute(t[0], b) == compute(t[0], b)
assert compute(t[-1], b) == compute(t[-1], b)
def test_nunique():
assert compute(t.a.nunique(), b) == 3
assert compute(t.nunique(), b) == 3
def test_selection_head():
ds = dshape('var * {a: int32, b: int32, c: float64}')
b = into(bcolz.ctable,
[(i, i + 1, float(i) ** 2) for i in range(10)],
dshape=ds)
t = symbol('t', ds)
# numpy reductions return numpy scalars
assert compute((t.a < t.b).all(), b).item() is True
assert list(compute(t[t.a < t.b].a.head(10), b)) == list(range(10))
assert list(compute(t[t.a > t.b].a.head(10), b)) == []
assert into([], compute(t[t.a + t.b > t.c], b)) == [(0, 1, 0),
(1, 2, 1),
(2, 3, 4)]
assert len(compute(t[t.a + t.b > t.c].head(10), b)) # non-empty
assert len(compute(t[t.a + t.b < t.c].head(10), b)) # non-empty
def test_selection_isnan():
b = bcolz.ctable([[1, np.nan, 3], [1., 2., np.nan]], names=['a', 'b'])
t = symbol('t', discover(b))
lhs = compute(t[t.a.isnan()], b)
rhs = np.array([(np.nan, 2.0)], dtype=b.dtype)
for n in b.dtype.names:
assert np.isclose(lhs[n], rhs[n], equal_nan=True).all()
assert np.isclose(compute(t[~t.b.isnan()], b)[n],
np.array(
[(1, 1.0), (np.nan, 2.0)], dtype=b.dtype)[n],
equal_nan=True).all()
def test_count_isnan():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_object():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_struct():
assert compute(t[~t.b.isnan()].count(), b) == 3
def test_nrows():
assert compute(t.nrows, b) == len(b)
def test_nelements():
assert compute(t.nelements(axis=0), b) == len(b)
assert compute(t.nelements(), b) == len(b)
# This is no longer desired. Handled by compute_up
def dont_test_pre_compute():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
result = pre_compute(s[['a', 'b']], b)
assert result.names == ['a', 'b']
def eq(a, b):
return np.array_equal(a, b)
def test_unicode_field_names():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
assert eq(compute(s[u'a'], b)[:], compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:], compute(s[['a', 'c']], b)[:])
assert eq(compute(s[u'a'], b)[:],
compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:],
compute(s[['a', 'c']], b)[:])
def test_chunksize_inference():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]),
chunklen=2)
assert get_chunksize(b) == 2
def test_notnull():
with pytest.raises(AttributeError):
t.b.notnull
def test_by_with_single_row():
ct = bcolz.ctable([[1, 1, 3, 3], [1, 2, 3, 4]], names=list('ab'))
t = symbol('t', discover(ct))
subset = t[t.a == 3]
expr = by(subset.a, b_sum=subset.b.sum())
result = compute(expr, ct)
expected = compute(expr, ct, optimize=False)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
themrmax/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 11 | 25443 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=-1, verbose=2,
random_state=1, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
| bsd-3-clause |
xhochy/arrow | python/pyarrow/tests/test_hdfs.py | 1 | 13325 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.util import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
with pytest.warns(DeprecationWarning):
return pa.hdfs.connect(host, port, user)
@pytest.mark.hdfs
class HdfsTestCases:
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client()
cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
# TODO for URI it should not be needed to pass this argument
result = pq.read_table(path, use_legacy_dataset=True)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(
path, filesystem=self.hdfs, use_legacy_dataset=True
).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
message = 'No libhdfs available on system'
if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):
pytest.fail(message)
else:
pytest.skip(message)
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.hdfs
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
def test_fastparquet_read_with_hdfs():
from pandas.testing import assert_frame_equal
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client()
df = util.make_dataframe()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
| apache-2.0 |
sumitsourabh/opencog | opencog/python/utility/functions.py | 34 | 11056 | from math import fabs, isnan
from datetime import datetime
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.numeric.globals import EPSILON
from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
from scipy.integrate import quad
__author__ = 'keyvan'
def integral(function, start, end):
if hasattr(function, 'integral'):
return function.integral(start, end)
area, error = quad(function, start, end)
return area
def almost_equals(a, b, epsilon=EPSILON):
if fabs(a - b) < epsilon:
return True
return False
def invoke_method_on(method, sequence_or_point):
if method is None:
return None
if not callable(method):
raise TypeError("'method' is not callable")
result = []
try:
for point in sequence_or_point:
if type(point) is datetime:
point = UnixTime(point)
result.append(method(point))
except TypeError:
if type(sequence_or_point) is datetime:
sequence_or_point = UnixTime(sequence_or_point)
return method(sequence_or_point)
return result
def index_of_first_local_maximum(sequence):
first_time = True
index = 0
for element in sequence:
if first_time:
previous = element
first_time = False
continue
if element <= previous:
return index
previous = element
index += 1
return None
class Function(object):
_domain = None
_range = None
_function_undefined = None
def __init__(self, function_undefined=None, domain=None):
if function_undefined is not None:
self.function_undefined = function_undefined
if domain is not None:
if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'):
raise TypeError("'domain' should be iterable and support indexing")
self._domain = domain
def call_on_single_point(self, x):
"""
to override, __call__ invokes this to handle both points and sequences
"""
return 0
def derivative(self, point):
return None
def _check_domain_for(self, feature_name):
if self.domain is None:
raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format(
self.__class__.__name__, feature_name))
def plot(self, plt=None):
self._check_domain_for('plotting')
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.domain, self.range)
return plt
@property
def function_undefined(self):
return self._function_undefined
@function_undefined.setter
def function_undefined(self, value):
if value is not None and not isinstance(value, Function):
raise TypeError("'function_undefined' should be of type 'Function'")
self._function_undefined = value
@property
def domain(self):
return self._domain
@property
def range(self):
return self()
def __call__(self, x=None):
if x is None:
self._check_domain_for("call with 'None'")
x = self.domain
return invoke_method_on(self.call_on_single_point, x)
def __getitem__(self, index):
self._check_domain_for('indexing')
return self.range[index]
def __len__(self):
self._check_domain_for('len()')
return len(self.range)
def __iter__(self):
self._check_domain_for('iter()')
return iter(self.range)
def __reversed__(self):
self._check_domain_for('reversed()')
return reversed(self.range)
class FunctionLinear(Function):
def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None):
#(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)])
if (a, b) == (None, None):
a = (float(y_1) - y_0) / (x_1 - x_0)
b = y_0 - a * x_0
if isnan(a) or isnan(b):
pass
self.a = a
self.b = b
def call_on_single_point(self, x):
return float(self.a * x + self.b)
def intersect(self, other):
if almost_equals(self.a, other.a):
return None
x = (float(other.b) - self.b) / (self.a - other.a)
return x, self(x)
def integral(self, start, end):
if start >= end:
return 0
if self.a == 0:
return self.b * (end - start)
x_intercept = self.x_intercept
if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept):
return (self(start) + self(end)) * (end - start) / 2.0
minus_triangle = (x_intercept - start) * self(start)
plus_triangle = (end - x_intercept) * self(end)
return minus_triangle + plus_triangle
def derivative(self, point):
return self.a
@property
def x_intercept(self):
return - float(self.b) / self.a
@property
def y_intercept(self):
return self(0)
class FunctionHorizontalLinear(FunctionLinear):
def __init__(self, y_intercept):
FunctionLinear.__init__(self, a=0, b=y_intercept)
def call_on_single_point(self, x):
return self.b
def integral(self, start, end):
if start >= end:
return 0
if almost_equals(self.b, 0):
return 0
return float(self.b) * (end - start)
def derivative(self, point):
return 0
FUNCTION_ZERO = FunctionHorizontalLinear(0)
FUNCTION_ONE = FunctionHorizontalLinear(1)
class FunctionComposite(Function):
is_normalised = False
def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False):
if is_normalised is not False:
self.is_normalised = True
Function.__init__(self, function_undefined=function_undefined, domain=domain)
if not isinstance(dictionary_bounds_function, dict):
raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) "
"tuple keys and values of type 'Function'")
self._dictionary_bounds_function = dictionary_bounds_function
def call_on_single_point(self, x):
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= x:
if b >= x:
if self.dictionary_bounds_function[function_bounds] is None:
return None
return self.dictionary_bounds_function[function_bounds](x)
return self.function_undefined(x)
def integral(self, start, end):
if self.is_normalised and self.domain is not None:
if (start < self.domain[0] or almost_equals(start, self.domain[0])) and (
end > self.domain[-1] or almost_equals(end, self.domain[-1])):
return 1.0
if start >= end:
return 0
result = 0
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= start:
if b >= end:
return self.dictionary_bounds_function[function_bounds].integral(start, end)
not_ordered = {
(start, 0): 's', (end, 0): 'e',
(a, 1): 'a', (b, 1): 'b'
}
order = ''.join([not_ordered[i] for i in sorted(not_ordered)])
if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe':
continue
if order in 'seab abse':
continue
if order == 'saeb':
b = end
elif order == 'asbe':
a = start
result += self.dictionary_bounds_function[function_bounds].integral(a, b)
return result
def find_bounds_for(self, point):
for bounds in self.dictionary_bounds_function:
(a, b) = bounds
if a <= point and b >= point:
return bounds
def derivative(self, point):
return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point)
def function_in_point(self, point):
for bounds in self.dictionary_bounds_function:
a, b = bounds
if a <= point <= b:
return self.dictionary_bounds_function[bounds]
return None
# def functions_in_interval(self, interval_start, interval_end):
# dictionary_bounds_function = {}
# for bounds in self.dictionary_bounds_function:
# a, b = bounds
# if (interval_start < a or almost_equals(interval_start, a)) and (
#
# ):
@property
def dictionary_bounds_function(self):
return self._dictionary_bounds_function
class FunctionPiecewiseLinear(FunctionComposite):
def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False):
self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output)
dictionary_bounds_function = {}
for i in xrange(1, len(self.input_list)):
x_0, x_1 = self.input_list[i - 1], self.input_list[i]
y_0, y_1 = self.output_list[i - 1], self.output_list[i]
dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1)
if NEGATIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined
if POSITIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined
FunctionComposite.__init__(self, dictionary_bounds_function,
function_undefined=function_undefined,
domain=self.input_list,
is_normalised=is_normalised)
def normalised(self):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
if almost_equals(area, 0):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
dictionary_input_output = {}
output_list = [y / area for y in self.output_list]
for i in xrange(len(self.input_list)):
dictionary_input_output[self.input_list[i]] = output_list[i]
result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined)
result.is_normalised = True
return result
def __and__(self, other):
for bounds in self.dictionary_bounds_function:
a, b = bounds
linear_function = self.dictionary_bounds_function[bounds]
if __name__ == '__main__':
a = FunctionLinear(1, 0)
b = FunctionLinear(-1, 1)
print a.intersect(b)
| agpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
nikitasingh981/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 50 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
Mako-kun/mangaki | mangaki/mangaki/utils/svd.py | 2 | 5410 | from django.contrib.auth.models import User
from mangaki.models import Rating, Work, Recommendation
from mangaki.utils.chrono import Chrono
from mangaki.utils.values import rating_values
from scipy.sparse import lil_matrix
from sklearn.utils.extmath import randomized_svd
import numpy as np
from django.db import connection
import pickle
import json
import math
NB_COMPONENTS = 10
TOP = 10
class MangakiSVD(object):
M = None
U = None
sigma = None
VT = None
chrono = None
inv_work = None
inv_user = None
work_titles = None
def __init__(self):
self.chrono = Chrono(True)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
def load(self, filename):
with open(filename, 'rb') as f:
backup = pickle.load(f)
self.M = backup.M
self.U = backup.U
self.sigma = backup.sigma
self.VT = backup.VT
self.inv_work = backup.inv_work
self.inv_user = backup.inv_user
self.work_titles = backup.work_titles
def fit(self, X, y):
self.work_titles = {}
for work in Work.objects.values('id', 'title'):
self.work_titles[work['id']] = work['title']
work_ids = list(Rating.objects.values_list('work_id', flat=True).distinct())
nb_works = len(work_ids)
self.inv_work = {work_ids[i]: i for i in range(nb_works)}
user_ids = list(User.objects.values_list('id', flat=True))
nb_users = len(user_ids)
self.inv_user = {user_ids[i]: i for i in range(nb_users)}
self.chrono.save('get_work_ids')
# print("Computing M: (%i × %i)" % (nb_users, nb_works))
self.M = lil_matrix((nb_users, nb_works))
"""ratings_of = {}
for (user_id, work_id), rating in zip(X, y):
ratings_of.setdefault(user_id, []).append(rating)"""
for (user_id, work_id), rating in zip(X, y):
self.M[self.inv_user[user_id], self.inv_work[work_id]] = rating #- np.mean(ratings_of[user_id])
# np.save('backupM', self.M)
self.chrono.save('fill matrix')
# Ranking computation
self.U, self.sigma, self.VT = randomized_svd(self.M, NB_COMPONENTS, n_iter=3, random_state=42)
# print('Formes', self.U.shape, self.sigma.shape, self.VT.shape)
self.save('backup.pickle')
self.chrono.save('factor matrix')
def predict(self, X):
y = []
for user_id, work_id in X:
i = self.inv_user[user_id]
j = self.inv_work[work_id]
y.append(self.U[i].dot(np.diag(self.sigma)).dot(self.VT.transpose()[j]))
return np.array(y)
def get_reco(self, username, sending=False):
target_user = User.objects.get(username=username)
the_user_id = target_user.id
svd_user = User.objects.get(username='svd')
work_ids = {self.inv_work[work_id]: work_id for work_id in self.inv_work}
nb_works = len(work_ids)
seen_works = set(Rating.objects.filter(user__id=the_user_id).exclude(choice='willsee').values_list('work_id', flat=True))
the_i = self.inv_user[the_user_id]
self.chrono.save('get_seen_works')
print('mon vecteur (taille %d)' % len(self.U[the_i]), self.U[the_i])
print(self.sigma)
for i, line in enumerate(self.VT):
print('=> Ligne %d' % (i + 1), '(ma note : %f)' % self.U[the_i][i])
sorted_line = sorted((line[j], self.work_titles[work_ids[j]]) for j in range(nb_works))[::-1]
top5 = sorted_line[:10]
bottom5 = sorted_line[-10:]
for anime in top5:
print(anime)
for anime in bottom5:
print(anime)
"""if i == 0 or i == 1: # First two vectors explaining variance
with open('vector%d.json' % (i + 1), 'w') as f:
vi = X.dot(line).tolist()
x_norm = [np.dot(X.data[k], X.data[k]) / (nb_works + 1) for k in range(nb_users + 1)]
f.write(json.dumps({'v': [v / math.sqrt(x_norm[k]) if x_norm[k] != 0 else float('inf') for k, v in enumerate(vi)]}))"""
# print(VT.dot(VT.transpose()))
# return
the_ratings = self.predict((the_user_id, work_ids[j]) for j in range(nb_works))
ranking = sorted(zip(the_ratings, [(work_ids[j], self.work_titles[work_ids[j]]) for j in range(nb_works)]), reverse=True)
# Summarize the results of the ranking for the_user_id:
# “=> rank, title, score”
c = 0
for i, (rating, (work_id, title)) in enumerate(ranking, start=1):
if work_id not in seen_works:
print('=>', i, title, rating, self.predict([(the_user_id, work_id)]))
if Recommendation.objects.filter(user=svd_user, target_user__id=the_user_id, work__id=work_id).count() == 0:
Recommendation.objects.create(user=svd_user, target_user_id=the_user_id, work_id=work_id)
c += 1
elif i < TOP:
print(i, title, rating)
if c >= TOP:
break
"""print(len(connection.queries), 'queries')
for line in connection.queries:
print(line)"""
self.chrono.save('complete')
def __str__(self):
return '[SVD]'
def get_shortname(self):
return 'svd'
| agpl-3.0 |
IndraVikas/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
samuelstjean/dipy | scratch/very_scratch/diffusion_sphere_stats.py | 20 | 18082 | import nibabel
import os
import numpy as np
import dipy as dp
#import dipy.core.generalized_q_sampling as dgqs
import dipy.reconst.gqi as dgqs
import dipy.reconst.dti as ddti
import dipy.reconst.recspeed as rp
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
#import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = ddti.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
#eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
def run_small_data():
#smalldir = '/home/ian/Devel/dipy/dipy/data/'
smalldir = '/home/eg309/Devel/dipy/dipy/data/'
# from os.path import join as opj
# bvals=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.bvals.npy'))
bvals=np.load(smalldir+'small_64D.bvals.npy')
# gradients=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.gradients.npy'))
gradients=np.load(smalldir+'small_64D.gradients.npy')
# img =ni.load(os.path.join(os.path.dirname(__file__),\
# 'data','small_64D.nii'))
img=nibabel.load(smalldir+'small_64D.nii')
small_data=img.get_data()
print 'real_data', small_data.shape
gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients)
tnsmall = ddti.Tensor(small_data,bvals,gradients)
x,y,z,a,b=tnsmall.evecs.shape
evecs=tnsmall.evecs
xyz=x*y*z
evecs = evecs.reshape(xyz,3,3)
#vs = np.sign(evecs[:,2,:])
#print vs.shape
#print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
#evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
#print evecs.shape
evals=tnsmall.evals
evals = evals.reshape(xyz,3)
#print evals.shape
#print('GQS in %d' %(t2-t1))
'''
eds=np.load(opj(os.path.dirname(__file__),\
'..','matrices',\
'evenly_distributed_sphere_362.npz'))
'''
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
#odf_faces=eds['faces']
#Yeh et.al, IEEE TMI, 2010
#calculate the odf using GQI
scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
#water diffusion coefficient
#l_values sqrt(6 D tau) D free water
#diffusion coefficiet and tau included in the b-value
tmp=np.tile(scaling,(3,1))
b_vector=gradients.T*tmp
Lambda = 1.2 # smoothing parameter - diffusion sampling length
q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
#implements equation no. 9 from Yeh et.al.
S=small_data.copy()
x,y,z,g=S.shape
S=S.reshape(x*y*z,g)
QA = np.zeros((x*y*z,5))
IN = np.zeros((x*y*z,5))
FA = tnsmall.fa().reshape(x*y*z)
fwd = 0
#Calculate Quantitative Anisotropy and find the peaks and the indices
#for every voxel
summary = {}
summary['vertices'] = odf_vertices
v = odf_vertices.shape[0]
summary['faces'] = odf_faces
f = odf_faces.shape[0]
for (i,s) in enumerate(S):
#print 'Volume %d' % i
istr = str(i)
summary[istr] = {}
t0, t1, t2, npa = gqsmall.npa(s, width = 5)
summary[istr]['triple']=(t0,t1,t2)
summary[istr]['npa']=npa
odf = Q2odf(s,q2odf_params)
peaks,inds=rp.peak_finding(odf,odf_faces)
fwd=max(np.max(odf),fwd)
#peaks = peaks - np.min(odf)
n_peaks=min(len(peaks),5)
peak_heights = [odf[i] for i in inds[:n_peaks]]
#QA[i][:l] = peaks[:n_peaks]
IN[i][:n_peaks] = inds[:n_peaks]
summary[istr]['odf'] = odf
summary[istr]['peaks'] = peaks
summary[istr]['inds'] = inds
summary[istr]['evecs'] = evecs[i,:,:]
summary[istr]['evals'] = evals[i,:]
summary[istr]['n_peaks'] = n_peaks
summary[istr]['peak_heights'] = peak_heights
# summary[istr]['fa'] = tnsmall.fa()[0]
summary[istr]['fa'] = FA[i]
'''
QA/=fwd
QA=QA.reshape(x,y,z,5)
IN=IN.reshape(x,y,z,5)
'''
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
#peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
#peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]
print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3)
return FA, summary
def Q2odf(s,q2odf_params):
''' construct odf for a voxel '''
odf=np.dot(s,q2odf_params)
return odf
#run_comparisons()
#run_gq_sims()
FA, summary = run_small_data()
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
fa_npa_1 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_1]
fa_npa_2 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_2]
fa_npa_3 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_3]
| bsd-3-clause |
Vimos/scikit-learn | sklearn/kernel_approximation.py | 7 | 18505 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
xebitstudios/Kayak | examples/poisson_glm.py | 3 | 1224 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import kayak
N = 10000
D = 5
P = 1
learn = 0.00001
batch_size = 500
# Random inputs.
X = npr.randn(N,D)
true_W = npr.randn(D,P)
lam = np.exp(np.dot(X, true_W))
Y = npr.poisson(lam)
kyk_batcher = kayak.Batcher(batch_size, N)
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)
# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)
# Weights.
W = 0.01*npr.randn(D,P)
kyk_W = kayak.Parameter(W)
# Linear layer.
kyk_activation = kayak.MatMult( kyk_inputs, kyk_W)
# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)
# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets
# Sum the losses.
kyk_loss = kayak.MatSum( kyk_nll )
for ii in xrange(100):
for batch in kyk_batcher:
loss = kyk_loss.value
print loss, np.sum((kyk_W.value - true_W)**2)
grad = kyk_loss.grad(kyk_W)
kyk_W.value -= learn * grad
# Plot the true and inferred rate for a subset of data.
T_slice = slice(0,100)
kyk_inputs.value = X[T_slice,:]
plt.figure()
plt.plot(lam[T_slice], 'k')
plt.plot(kyk_lam.value, '--r')
plt.show() | mit |
jbloom/mutpath | src/plot.py | 1 | 10257 | """Module for performing plotting for ``mutpath`` package.
This module uses ``pylab`` and ``matplotlib`` to make plots. These plots will
fail if ``pylab`` and ``matplotlib`` are not available for importation. Before
running any function in this module, you can run the *PylabAvailable*
function to determine if ``pylab`` and ``matplotlib`` are available. Otherwise,
calling any other function will raise an Exception if thise modules are
not available. The ``pdf`` backend is used for ``matplotlib`` / ``pylab``. This means
that plots must be created as PDF files.
Functions are:
`PylabAvailable`
`CumulativeFractionPlot`
'DatesPlot`
`Base10Formatter`
`SplitLabel`
Written by Jesse Bloom.
"""
import os
import sys
import math
# global variable _pylabavailable indicates if pylab/matplotlib present
try:
import matplotlib
matplotlib.use('pdf')
import pylab
_pylabavailable = True
except ImportError:
_pylabavailable = False
def PylabAvailable():
"""Returns True if pylab/matplotlib available, False otherwise.
You should call this function to test for the availability of the
pylab/matplotlib plotting modules before using other functions in
this module.
"""
return _pylabavailable
def DatesPlot(mutdates, plotfile, interval):
"""Plots dates of mutations.
Uses pylab / matplotlib to plot the dates and credible intervals
for mutations. Will raise an error *PylabAvailable() == False*.
The plot is a PDF.
* *mutdates* is a list of the mutations, in the form of the tuples
*(median, mininterval, maxinterval, mut, fractoca, weight)*. Mutations
are plotted in the order they are listed. In these tuples:
* *median* : posterior median date
* *minterval* : minimum of credible interval
* *maxinterval* : maximum of credible interval
* *mut* : string giving name of mutation
* *fractoca* : probability mutation is on path from common ancestor
to starting sequence
* *weight* : fraction of paths containing mutation.
* *plotfile* is a string giving the name of the PDF file we create.
* *interval* is the range of the credible interval. For example, 0.9
means a 90% credible interval.
"""
ext = os.path.splitext(plotfile)[1].lower()
if ext != '.pdf':
raise ValueError("Extension must be .pdf, but found %s" % ext)
if not PylabAvailable():
raise ValueError("pylab / matplotlib not available.")
if not mutdates:
raise ValueError("no mutation dates to plot")
tocalabels = []
tocamedians = []
tocaerrlow = []
tocaerrhigh = []
tocays = []
fromcalabels = []
fromcamedians = []
fromcaerrlow = []
fromcaerrhigh = []
fromcays = []
y = 0
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
label = "%s" % (mut)
errlow = median - mininterval
errhigh = maxinterval - median
if fractoca > 0.5:
tocays.append(y)
tocalabels.append(label)
tocamedians.append(median)
tocaerrlow.append(errlow)
tocaerrhigh.append(errhigh)
else:
fromcays.append(y)
fromcalabels.append(label)
fromcamedians.append(median)
fromcaerrlow.append(errlow)
fromcaerrhigh.append(errhigh)
y += 1
(lmargin, rmargin, bmargin, tmargin) = (0.11, 0.05, 0.08, 0.01)
matplotlib.rc('font', size=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('legend', numpoints=1)
matplotlib.rc('legend', fontsize=10)
fig = pylab.figure(figsize=(6, 6))
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - tmargin - bmargin])
tocabar = fromcabar = None
if tocalabels:
tocabar = pylab.errorbar(tocamedians, tocays, xerr=[tocaerrlow, tocaerrhigh], fmt='sr')
if fromcalabels:
fromcabar = pylab.errorbar(fromcamedians, fromcays, xerr=[fromcaerrlow, fromcaerrhigh], fmt='sb')
ny = len(mutdates)
pylab.gca().set_ylim((-1, ny))
pylab.gca().yaxis.set_major_locator(matplotlib.ticker.FixedLocator([y for y in range(ny)]))
pylab.gca().yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(tocalabels + fromcalabels))
pylab.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
pylab.xlabel("Date (posterior median and Bayesian %.2f%s credible interval)" % (interval * 100, '%'))
if tocabar and fromcabar:
pylab.legend([tocabar[0], fromcabar[0]], ['path to common ancestor', 'path from common ancestor'], loc='lower right')
elif tocabar:
pylab.legend([tocabar[0]], ['path to common ancestor'], loc='lower right')
elif fromcabar:
pylab.legend([fromcabar[0]], ['path from common ancestor'], loc='lower right')
pylab.savefig(plotfile)
def CumulativeFractionPlot(datalist, plotfile, title, xlabel):
"""Creates a cumulative fraction plot.
Takes a list of numeric data. Plots a cumulative fraction
plot giving the fraction of the data points that are <=
the indicated value.
*datalist* is a list of numbers giving the data for which we
are computing the cumulative fraction plot. Raises an
exception if this is an empty list.
*plotfile* is the name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be '.pdf'.
*title* is a string placed above the plot as a title. Uses LaTex
formatting.
*xlabel* is the label given to the X-axis. Uses LaTex formatting.
This function uses pylab / matplotlib. It will raise an Exception if
these modules cannot be imported (if PylabAvailable() is False).
"""
if len(datalist) < 1:
raise ValueError("datalist is empty")
if not _pylabavailable:
raise ImportError("Could not find pylab or matplotlib")
if os.path.splitext(plotfile)[1] != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
datalist.sort() # sort from smallest to largest
(xmin, xmax) = (datalist[0], datalist[-1])
n = len(datalist)
cumfracs = []
cf = 0.0
for x in datalist:
cf += 1. / n
cumfracs.append(cf)
assert len(datalist) == len(cumfracs)
assert abs(1.0 - cf) < 1e-7
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=12)
fig = pylab.figure(figsize=(6, 4))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.01, 0.15, 0.1)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
pylab.plot(datalist, cumfracs, 'r-')
pylab.gca().set_ylim([0, 1])
pylab.gca().set_xlim([xmin, xmax])
pylab.ylabel('cumulative fraction')
pylab.xlabel(xlabel)
pylab.title(title)
if plotfile:
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by exp_cutoff.
*number* the number to be formatted, should be a float or integer.
Currently only works for numbers >= 0
*exp_cutoff* convert to scientific notation if abs(math.log10(number)) >= this.
*exp_decimal_digits* show this many digits after the decimal if number
is converted to scientific notation.
*decimal_digits* show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is the LaTex' string. If the number is zero, the
returned string is simply '0'.
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def SplitLabel(label, splitlen, splitchar):
"""Splits a string with a return if it exceeds a certain length.
*label* a string giving the label we might split.
*splitlen* the maximum length of a label before we attempt to
split it.
*splitchar* the character added when splitting a label.
If len(*label*) > *splitlen*, we attempt to split the label in the
middle by adding *splitchar*. The label is split as close to the
middle as possible while splitting at a space.
No splitting as label length less than *splitlen*
>>> SplitLabel('WT virus 1', 10, '\\n')
'WT virus 1'
Splitting of this label
>>> SplitLabel('WT plasmid 1', 10, '\\n')
'WT\\nplasmid 1'
Splitting of this label
>>> SplitLabel('mutated WT plasmid 1', 10, '\\n')
'mutated WT\\nplasmid 1'
"""
if len(label) <= splitlen:
return label
else:
j = 0
imid = len(label) // 2
index = None
while 0 <= imid - j <= imid + j < len(label):
if label[imid - j].isspace():
return "%s%s%s" % (label[ : imid - j], splitchar, label[imid - j + 1 : ])
elif label[imid + j].isspace():
return "%s%s%s" % (label[ : imid + j], splitchar, label[imid + j + 1 : ])
j += 1
else:
return label # no white space to split
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
edxnercel/edx-platform | .pycharm_helpers/pydev/pydev_ipython/inputhook.py | 52 | 18411 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| agpl-3.0 |
mmottahedi/neuralnilm_prototype | scripts/e249.py | 2 | 3897 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=True
#lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
layers_config=[
{
'type': LSTMLayer,
'num_units': 10,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
]
)
def exp_x(name, learning_rate):
global source
try:
a = source
except NameError:
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
updates=partial(nesterov_momentum, learning_rate=learning_rate)
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
for experiment, learning_rate in [('a', 1.0), ('b', 0.1), ('c', 0.01),
('d', 0.001), ('e', 0.0001)]:
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name, learning_rate)
run_experiment(net, path, epochs=1000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
| mit |
carlvlewis/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/datasets/_openml.py | 2 | 34451 | import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ['fetch_openml']
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', openml_path + ".gz")
def _retry_with_clean_cache(
openml_path: str, data_home: Optional[str]
) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + openml_path)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode='rb')
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, 'rb')
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff['data']
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError(
"shape must be provided when arr['data'] is a Generator"
)
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(itertools.chain.from_iterable(arff_data),
dtype='float64', count=count)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape, dtype=np.float64)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError('Unexpected Data Type obtained from arff.')
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame
"""
if feature['data_type'] == 'string':
return object
elif feature['data_type'] == 'nominal':
return 'category'
# only numeric, integer, real are left
elif (feature['number_of_missing_values'] != '0' or
feature['data_type'] in ['numeric', 'real']):
# cast to floats when there are any missing values
return np.float64
elif feature['data_type'] == 'integer':
return np.int64
raise ValueError('Unsupported feature: {}'.format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support('fetch_openml with as_frame=True')
attributes = OrderedDict(arff['attributes'])
arff_columns = list(attributes)
if not isinstance(arff['data'], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff['data'])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff['data'], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == 'category':
cats_without_missing = [cat for cat in attributes[column]
if cat is not None and
not is_scalar_nan(cat)]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df, )
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data['data']['dataset']
if len(res) > 1:
warn("Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]['version']))
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name,
version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data['data']['dataset'][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_set_description']
def _get_data_features(
data_id: int, data_home: Optional[str]
) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_features']['feature']
def _get_data_qualities(
data_id: int, data_home: Optional[str]
) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get('data_qualities', {}).get('quality', [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d['name']: d['value'] for d in data_qualities}
return int(float(qualities.get('NumberOfInstances', default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type, encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode('utf-8')
stream = _stream_checksum_generator(response)
arff = _arff.load(stream,
return_type=return_type,
encode_nominal=encode_nominal)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError("md5 checksum of local file for " + url +
" does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry...")
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
):
"""Download OpenML ARFF and convert to Bunch of data
"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature['name']: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]['index'])
for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]['index'])
for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat['number_of_missing_values'])
if nr_missing > 0:
raise ValueError('Target column {} has {} missing values. '
'Missing values are not supported for target '
'columns. '.format(feat['name'], nr_missing))
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(_convert_arff_data_dataframe, columns=columns,
features_dict=features_dict)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {k: v for k, v in arff['attributes']
if isinstance(v, list) and
k in data_columns + target_columns}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {col_name in nominal_attributes
for col_name in target_columns}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack([
np.take(
np.asarray(nominal_attributes.pop(col_name),
dtype='O'),
y[:, i:i + 1].astype(int, copy=False))
for i, col_name in enumerate(target_columns)
])
elif any(is_classification):
raise ValueError('Mix of nominal and non-nominal targets is '
'not currently supported')
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(
_load_arff_response)(url, data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(data=X, target=y, frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError('target_column should be list, '
'got: %s' % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError('Could not find target_column={}')
if features_dict[target_column]['data_type'] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]['is_ignore'] == 'true':
warn('target_column={} has flag is_ignore.'.format(
target_column))
if features_dict[target_column]['is_row_identifier'] == 'true':
warn('target_column={} has flag is_row_identifier.'.format(
target_column))
if len(found_types) > 1:
raise ValueError('Can only handle homogeneous multi-target datasets, '
'i.e., all targets are either numeric or '
'categorical.')
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (feature['name'] not in target_columns
and feature['is_ignore'] != 'true'
and feature['is_row_identifier'] != 'true'):
valid_data_column_names.append(feature['name'])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = 'active',
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = 'default-target',
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = 'auto'
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'openml')
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name))
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info['did']
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version))
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or "
"data_id.")
data_description = _get_data_description_by_id(data_id, data_home)
if data_description['status'] != "active":
warn("Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description['version'],
data_description['name'],
data_description['url']))
if 'error' in data_description:
warn("OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description['error']))
if 'warning' in data_description:
warn("OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description['warning']))
return_sparse = False
if data_description['format'].lower() == 'sparse_arff':
return_sparse = True
if as_frame == 'auto':
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError('Cannot return dataframe with sparse data')
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if 'true' in (feature['is_ignore'], feature['is_row_identifier']):
continue
if feature['data_type'] == 'string':
raise ValueError('STRING attributes are not supported for '
'array representation. Try as_frame=True')
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [feature['name'] for feature in features_list
if feature['is_target'] == 'true']
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError("Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column)))
data_columns = _valid_data_column_names(features_list,
target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description['file_id'])
bunch = _download_data_to_bunch(url, return_sparse, data_home,
as_frame=bool(as_frame),
features_list=features_list, shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description[
"md5_checksum"])
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop('description'))
bunch.update(
DESCR=description, details=data_description,
url="https://www.openml.org/d/{}".format(data_id))
return bunch
| bsd-3-clause |
zrhans/pythonanywhere | pyscripts/ply_wrose.py | 1 | 1678 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,Pres.Atm.,Pres.Atm._min,Pres.Atm._max,Temp.Int.,Temp.Int._min,Temp.Int._max,CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,SO2,SO2_min,SO2_max,O3,O3_min,O3_max,NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,CO,CO_min,CO_max,MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
x = df.DVE
y = df.VVE
#print(y)
# Definindo as series dedados
trace1 = go.Area(
r = y,#["2015-12-01","2015-12-01 01:00:00","2015-12-01 02:00:00","2015-12-01 03:00:00","2015-12-01 04:00:00","2015-12-01 05:00:00"],
t = x,#[74.73,76.59,76.5,79.03,77.89,81.9,],
name='Vento m/s',
marker=dict(
color='rgb(158,154,200)'
)
)
# Edit the layout
layout = go.Layout(
title='Distribuição da Velocidade do Vento no diagrama Laurel',
font = dict(size=16),
radialaxis=dict(
ticksuffix='m/s'
),
orientation=270
)
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/oi_wrose', # name of the file as saved in your plotly account
sharing='public'
) # 'public' | 'private' | 'secret': Learn more: https://plot.ly/python/privacy
| apache-2.0 |
dudulianangang/vps | EneConsTest.py | 1 | 5969 | import sdf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
plt.style.use('seaborn-white')
# plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = 'Tahoma'
# # plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 16
# plt.rcParams['axes.labelsize'] = 10
# plt.rcParams['axes.labelweight'] = 'bold'
# plt.rcParams['xtick.labelsize'] = 8
# plt.rcParams['ytick.labelsize'] = 8
# plt.rcParams['legend.fontsize'] = 10
# plt.rcParams['figure.titlesize'] = 12
# constants for normalization
n0 = 1.8e20
me = 9.1e-31
qe = 1.6e-19
ep = 8.9e-12
c = 3e8
wp = np.sqrt(n0*qe*qe/me/ep)
ld = c/wp
e0 = me*c*wp/qe
b0 = e0/c
tt = 1/wp
ts = 50*5
te = 1500
pct = 100
en0 = me*c**2
en1 = 0.5*ep*ld**2
# simulation domain
nx = 3500
ny = 3500
lx = 3500
ly = 3500
# figure domain (set by grid)
grid_min_x = 0
grid_max_x = nx
grid_min_y = 0
grid_max_y = ny
Gx = np.linspace(0,lx,nx)
Gy = np.linspace(0,ly,ny)
gx = Gx[grid_min_x:grid_max_x+1]
gy = Gy[grid_min_y:grid_max_y+1]
# figure parameters
# fs = 24
jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values
jet_vals = jetcmap(np.arange(9)) #extract those values as an array
jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value
newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals)
# define array
EneBmE = np.ones(7)
EneBmI = np.ones(7)
EneBgE = np.ones(7)
EneBgI = np.ones(7)
sex = np.ones(7)
sey = np.ones(7)
sez = np.ones(7)
sbx = np.ones(7)
sby = np.ones(7)
sbz = np.ones(7)
TpeC1 = np.ones(7)
TpeS1 = np.ones(7)
TfeC1 = np.ones(7)
TfeS1 = np.ones(7)
TpeC2 = np.ones(7)
TpeS2 = np.ones(7)
TfeC2 = np.ones(7)
TfeS2 = np.ones(7)
TeC1 = np.ones(7)
TeS1 = np.ones(7)
TeC2 = np.ones(7)
TeS2 = np.ones(7)
time = np.ones(7)
# plot function
file = '/Volumes/yaowp2016/'
folder = 'nj'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC1[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC1[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
folder = 'nj_non'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC2[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC2[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS2[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
TeC1 = TpeC1+TfeC1
TeS1 = TpeS1+TfeS1
TeC2 = TpeC2+TfeC2
TeS2 = TpeS2+TfeS2
np.save('tpec1.npy', TpeC1)
np.save('tpes1.npy', TpeS1)
np.save('tfec1.npy', TfeC1)
np.save('tfes1.npy', TfeS1)
np.save('tpec2.npy', TpeC2)
np.save('tpes2.npy', TpeS2)
np.save('tfec2.npy', TfeC2)
np.save('tfes2.npy', TfeS2)
np.save('tec1.npy', TeC1)
np.save('tes1.npy', TeS1)
np.save('tec2.npy', TeC2)
np.save('tes2.npy', TeS2)
# plt.figure(figsize=(8,5))
# ax = plt.subplot()
# ax.plot(time, TpeC1,'r-', lw=2, label='tbc-cal')
# ax.plot(time, TpeS1,'r--', lw=2, label='tbc-sys')
# ax.plot(time, TpeC2,'b-', lw=2, label='pbc-cal')
# ax.plot(time, TpeS2,'b--', lw=2, label='pbc-sys')
# plt.xlabel('time($\omega_{pe}^{-1}$)',fontsize=24)
# plt.ylabel('energy($J$)',fontsize=24)
# plt.legend(loc='best', numpoints=1, fancybox=True)
# plt.title('total system energy',fontsize=32,fontstyle='normal')
# plt.show()
# plt.savefig(file+folder+'/plots/'+'TotalEnergyComp.png',bbox_inches='tight') # n means normalized
# plt.close()
| apache-2.0 |
planetarymike/IDL-Colorbars | IDL_py_test/027_Eos_B.py | 1 | 5942 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 1., 1.],
[1., 1., 1.],
[0.498039, 0.498039, 0.498039],
[0., 0., 0.513725],
[0., 0., 0.533333],
[0., 0., 0.54902],
[0., 0., 0.564706],
[0., 0., 0.580392],
[0., 0., 0.6],
[0., 0., 0.615686],
[0., 0., 0.568627],
[0., 0., 0.584314],
[0., 0., 0.666667],
[0., 0., 0.682353],
[0., 0., 0.698039],
[0., 0., 0.713725],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.831373],
[0., 0., 0.847059],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 0.898039],
[0., 0., 0.913725],
[0., 0., 0.933333],
[0., 0., 0.94902],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 1.],
[0., 0.027451, 0.968627],
[0., 0.0588235, 0.937255],
[0., 0.0901961, 0.905882],
[0., 0.121569, 0.87451],
[0., 0.152941, 0.843137],
[0., 0.184314, 0.811765],
[0., 0.215686, 0.780392],
[0., 0.223529, 0.67451],
[0., 0.25098, 0.643137],
[0., 0.309804, 0.686275],
[0., 0.341176, 0.654902],
[0., 0.372549, 0.623529],
[0., 0.403922, 0.592157],
[0., 0.435294, 0.560784],
[0., 0.466667, 0.529412],
[0., 0.498039, 0.498039],
[0., 0.529412, 0.466667],
[0., 0.505882, 0.392157],
[0., 0.533333, 0.364706],
[0., 0.623529, 0.372549],
[0., 0.654902, 0.341176],
[0., 0.686275, 0.309804],
[0., 0.717647, 0.278431],
[0., 0.74902, 0.247059],
[0., 0.780392, 0.215686],
[0., 0.811765, 0.184314],
[0., 0.843137, 0.152941],
[0., 0.784314, 0.109804],
[0., 0.811765, 0.0823529],
[0., 0.937255, 0.0588235],
[0., 0.968627, 0.027451],
[0., 1., 0.],
[0.0352941, 1., 0.],
[0.0705882, 1., 0.],
[0.105882, 1., 0.],
[0.141176, 1., 0.],
[0.176471, 1., 0.],
[0.192157, 0.898039, 0.],
[0.223529, 0.898039, 0.],
[0.282353, 1., 0.],
[0.317647, 1., 0.],
[0.356863, 1., 0.],
[0.392157, 1., 0.],
[0.427451, 1., 0.],
[0.462745, 1., 0.],
[0.498039, 1., 0.],
[0.533333, 1., 0.],
[0.513725, 0.898039, 0.],
[0.545098, 0.898039, 0.],
[0.639216, 1., 0.],
[0.678431, 1., 0.],
[0.713725, 1., 0.],
[0.74902, 1., 0.],
[0.784314, 1., 0.],
[0.819608, 1., 0.],
[0.854902, 1., 0.],
[0.890196, 1., 0.],
[0.835294, 0.898039, 0.],
[0.866667, 0.898039, 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.964706, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.882353, 0.],
[0.898039, 0.776471, 0.],
[0.898039, 0.764706, 0.],
[1., 0.831373, 0.],
[1., 0.815686, 0.],
[1., 0.8, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.713725, 0.],
[0.898039, 0.627451, 0.],
[0.898039, 0.611765, 0.],
[1., 0.662745, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.615686, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.54902, 0.],
[0.898039, 0.478431, 0.],
[0.898039, 0.462745, 0.],
[1., 0.498039, 0.],
[1., 0.490196, 0.],
[1., 0.482353, 0.],
[1., 0.47451, 0.],
[1., 0.466667, 0.],
[1., 0.454902, 0.],
[1., 0.447059, 0.],
[1., 0.439216, 0.],
[0.898039, 0.388235, 0.],
[0.898039, 0.380392, 0.],
[1., 0.415686, 0.],
[1., 0.407843, 0.],
[1., 0.4, 0.],
[1., 0.388235, 0.],
[1., 0.380392, 0.],
[1., 0.372549, 0.],
[1., 0.364706, 0.],
[1., 0.356863, 0.],
[0.898039, 0.313725, 0.],
[0.898039, 0.305882, 0.],
[1., 0.329412, 0.],
[1., 0.321569, 0.],
[1., 0.313725, 0.],
[1., 0.305882, 0.],
[1., 0.298039, 0.],
[1., 0.290196, 0.],
[1., 0.282353, 0.],
[1., 0.27451, 0.],
[0.898039, 0.239216, 0.],
[0.898039, 0.231373, 0.],
[1., 0.247059, 0.],
[1., 0.239216, 0.],
[1., 0.231373, 0.],
[1., 0.223529, 0.],
[1., 0.215686, 0.],
[1., 0.207843, 0.],
[1., 0.196078, 0.],
[1., 0.188235, 0.],
[0.898039, 0.164706, 0.],
[0.898039, 0.156863, 0.],
[1., 0.164706, 0.],
[1., 0.156863, 0.],
[1., 0.14902, 0.],
[1., 0.141176, 0.],
[1., 0.129412, 0.],
[1., 0.121569, 0.],
[1., 0.113725, 0.],
[1., 0.105882, 0.],
[0.898039, 0.0862745, 0.],
[0.898039, 0.0823529, 0.],
[1., 0.0823529, 0.],
[1., 0.0745098, 0.],
[1., 0.0627451, 0.],
[1., 0.054902, 0.],
[1., 0.0470588, 0.],
[1., 0.0509804, 0.],
[1., 0.0313725, 0.],
[1., 0.0235294, 0.],
[0.898039, 0.0117647, 0.],
[0.898039, 0.00392157, 0.],
[1., 0., 0.],
[0.992157, 0., 0.],
[0.984314, 0., 0.],
[0.976471, 0., 0.],
[0.968627, 0., 0.],
[0.960784, 0., 0.],
[0.952941, 0., 0.],
[0.945098, 0., 0.],
[0.843137, 0., 0.],
[0.839216, 0., 0.],
[0.921569, 0., 0.],
[0.917647, 0., 0.],
[0.909804, 0., 0.],
[0.901961, 0., 0.],
[0.894118, 0., 0.],
[0.886275, 0., 0.],
[0.878431, 0., 0.],
[0.870588, 0., 0.],
[0.776471, 0., 0.],
[0.768627, 0., 0.],
[0.847059, 0., 0.],
[0.843137, 0., 0.],
[0.835294, 0., 0.],
[0.827451, 0., 0.],
[0.819608, 0., 0.],
[0.811765, 0., 0.],
[0.803922, 0., 0.],
[0.796078, 0., 0.],
[0.709804, 0., 0.],
[0.701961, 0., 0.],
[0.772549, 0., 0.],
[0.768627, 0., 0.],
[0.760784, 0., 0.],
[0.752941, 0., 0.],
[0.745098, 0., 0.],
[0.737255, 0., 0.],
[0.729412, 0., 0.],
[0.721569, 0., 0.],
[0.643137, 0., 0.],
[0.635294, 0., 0.],
[0.698039, 0., 0.],
[0.690196, 0., 0.],
[0.686275, 0., 0.],
[0.678431, 0., 0.],
[0.670588, 0., 0.],
[0.662745, 0., 0.],
[0.654902, 0., 0.],
[0.647059, 0., 0.],
[0.576471, 0., 0.],
[0.568627, 0., 0.],
[0.623529, 0., 0.],
[0.615686, 0., 0.],
[0.611765, 0., 0.],
[0.603922, 0., 0.],
[0.596078, 0., 0.],
[0.588235, 0., 0.],
[0.580392, 0., 0.],
[0.572549, 0., 0.],
[0.509804, 0., 0.],
[0.501961, 0., 0.],
[0.54902, 0., 0.],
[0.541176, 0., 0.],
[0.537255, 0., 0.],
[0.529412, 0., 0.],
[0.521569, 0., 0.],
[0.513725, 0., 0.],
[0.505882, 0., 0.],
[0.498039, 0., 0.],
[0.443137, 0., 0.],
[0.435294, 0., 0.],
[0.47451, 0., 0.],
[0.466667, 0., 0.],
[0.458824, 0., 0.],
[0.458824, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
ManuSchmi88/landlab | landlab/plot/imshow.py | 3 | 21050 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
hrjn/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
waddell/urbansim | urbansim/urbanchoice/mnl.py | 4 | 9002 | """
Number crunching code for multinomial logit.
``mnl_estimate`` and ``mnl_simulate`` especially are used by
``urbansim.models.lcm``.
"""
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import scipy.optimize
import pmat
from pmat import PMAT
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
# right now MNL can only estimate location choice models, where every equation
# is the same
# it might be better to use stats models for a non-location choice problem
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_probs(data, beta, numalts):
logging.debug('start: calculate MNL probabilities')
clamp = data.typ == 'numpy'
utilities = beta.multiply(data)
if numalts == 0:
raise Exception("Number of alternatives is zero")
utilities.reshape(numalts, utilities.size() / numalts)
exponentiated_utility = utilities.exp(inplace=True)
if clamp:
exponentiated_utility.inftoval(1e20)
if clamp:
exponentiated_utility.clamptomin(1e-300)
sum_exponentiated_utility = exponentiated_utility.sum(axis=0)
probs = exponentiated_utility.divide_by_row(
sum_exponentiated_utility, inplace=True)
if clamp:
probs.nantoval(1e-300)
if clamp:
probs.clamptomin(1e-300)
logging.debug('finish: calculate MNL probabilities')
return probs
def get_hessian(derivative):
return np.linalg.inv(np.dot(derivative, np.transpose(derivative)))
def get_standard_error(hessian):
return np.sqrt(np.diagonal(hessian))
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_loglik(beta, data, chosen, numalts, weights=None, lcgrad=False,
stderr=0):
logger.debug('start: calculate MNL log-likelihood')
numvars = beta.size
numobs = data.size() / numvars / numalts
beta = np.reshape(beta, (1, beta.size))
beta = PMAT(beta, data.typ)
probs = mnl_probs(data, beta, numalts)
# lcgrad is the special gradient for the latent class membership model
if lcgrad:
assert weights
gradmat = weights.subtract(probs).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
else:
if not weights:
gradmat = chosen.subtract(probs).reshape(probs.size(), 1)
else:
gradmat = chosen.subtract(probs).multiply_by_row(
weights).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
if stderr:
gradmat = data.multiply_by_row(gradmat.reshape(1, gradmat.size()))
gradmat.reshape(numvars, numalts * numobs)
return get_standard_error(get_hessian(gradmat.get_mat()))
chosen.reshape(numalts, numobs)
if weights is not None:
if probs.shape() == weights.shape():
loglik = ((probs.log(inplace=True)
.element_multiply(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = ((probs.log(inplace=True)
.multiply_by_row(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = (probs.log(inplace=True).element_multiply(
chosen, inplace=True)).sum(axis=1).sum(axis=0)
if loglik.typ == 'numpy':
loglik, gradarr = loglik.get_mat(), gradarr.get_mat().flatten()
else:
loglik = loglik.get_mat()[0, 0]
gradarr = np.reshape(gradarr.get_mat(), (1, gradarr.size()))[0]
logger.debug('finish: calculate MNL log-likelihood')
return -1 * loglik, -1 * gradarr
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True):
"""
Get the probabilities for each chooser choosing between `numalts`
alternatives.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
coeff : 1D array
The model coefficients corresponding to each column in `data`.
numalts : int
The number of alternatives available to each chooser.
GPU : bool, optional
returnprobs : bool, optional
If True, return the probabilities for each chooser/alternative instead
of actual choices.
Returns
-------
probs or choices: 2D array
If `returnprobs` is True the probabilities are a 2D array with a
row for each chooser and columns for each alternative.
"""
logger.debug(
'start: MNL simulation with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
data = np.transpose(data)
coeff = np.reshape(np.array(coeff), (1, len(coeff)))
data, coeff = PMAT(data, atype), PMAT(coeff, atype)
probs = mnl_probs(data, coeff, numalts)
if returnprobs:
return np.transpose(probs.get_mat())
# convert to cpu from here on - gpu doesn't currently support these ops
if probs.typ == 'cuda':
probs = PMAT(probs.get_mat())
probs = probs.cumsum(axis=0)
r = pmat.random(probs.size() / numalts)
choices = probs.subtract(r, inplace=True).firstpositive(axis=0)
logger.debug('finish: MNL simulation')
return choices.get_mat()
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3),
weights=None, lcgrad=False, beta=None):
"""
Calculate coefficients of the MNL model.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
chosen : 2D array
This boolean array has a row for each chooser and a column for each
alternative. The column ordering for alternatives is expected to be
the same as their row ordering in the `data` array.
A one (True) indicates which alternative each chooser has chosen.
numalts : int
The number of alternatives.
GPU : bool, optional
coeffrange : tuple of floats, optional
Limits of (min, max) to which coefficients are clipped.
weights : ndarray, optional
lcgrad : bool, optional
beta : 1D array, optional
Any initial guess for the coefficients.
Returns
-------
log_likelihood : dict
Dictionary of log-likelihood values describing the quality of
the model fit.
fit_parameters : pandas.DataFrame
Table of fit parameters with columns 'Coefficient', 'Std. Error',
'T-Score'. Each row corresponds to a column in `data` and are given
in the same order as in `data`.
See Also
--------
scipy.optimize.fmin_l_bfgs_b : The optimization routine used.
"""
logger.debug(
'start: MNL fit with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
numvars = data.shape[1]
numobs = data.shape[0] / numalts
if chosen is None:
chosen = np.ones((numobs, numalts)) # used for latent classes
data = np.transpose(data)
chosen = np.transpose(chosen)
data, chosen = PMAT(data, atype), PMAT(chosen, atype)
if weights is not None:
weights = PMAT(np.transpose(weights), atype)
if beta is None:
beta = np.zeros(numvars)
bounds = [coeffrange] * numvars
with log_start_finish('scipy optimization for MNL fit', logger):
args = (data, chosen, numalts, weights, lcgrad)
bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik,
beta,
args=args,
fprime=None,
factr=10,
approx_grad=False,
bounds=bounds
)
beta = bfgs_result[0]
stderr = mnl_loglik(
beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad)
l0beta = np.zeros(numvars)
l0 = -1 * mnl_loglik(l0beta, *args)[0]
l1 = -1 * mnl_loglik(beta, *args)[0]
log_likelihood = {
'null': float(l0[0][0]),
'convergence': float(l1[0][0]),
'ratio': float((1 - (l1 / l0))[0][0])
}
fit_parameters = pd.DataFrame({
'Coefficient': beta,
'Std. Error': stderr,
'T-Score': beta / stderr})
logger.debug('finish: MNL fit')
return log_likelihood, fit_parameters
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/tree/tests/test_tree.py | 32 | 52369 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
| bsd-3-clause |
dmitriz/zipline | zipline/utils/tradingcalendar.py | 6 | 11182 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
barbagroup/PetIBM | examples/ibpm/cylinder2dRe40/scripts/plotVorticity.py | 4 | 1401 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 2000 time steps (20 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 2000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-3.0, 3.0, 16)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-1.0, 4.0)
ax.set_ylim(-2.0, 2.0)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
rhoscanner-team/pcd-plotter | delaunay_example.py | 1 | 1435 | import numpy as np
from scipy.spatial import Delaunay
points = np.random.rand(30, 2) # 30 points in 2-d
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
# -- the same stuff for the convex hull
edges = set()
edge_points = []
for ia, ib in tri.convex_hull:
add_edge(ia, ib)
lines = LineCollection(edge_points)
plt.figure()
plt.title('Convex hull')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
| gpl-2.0 |
rahul-c1/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
hainm/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
nmartensen/pandas | pandas/io/sql.py | 3 | 58612 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
used.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isna(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notna_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnadata = col[~isna(col)]
if len(notnadata):
col_for_inference = notnadata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notna_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notna_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| bsd-3-clause |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |