repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
winklerand/pandas | pandas/tests/frame/test_join.py | 11 | 5226 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import DataFrame, Index, PeriodIndex
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list('abcde'),
index=PeriodIndex(start='2000', freq='A', periods=4))
@pytest.fixture
def frame():
return TestData().frame
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
def test_join_index(frame):
# left / right
f = frame.loc[frame.index[:10], ['A', 'B']]
f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
expected_columns = Index(['A', 'B', 'C', 'D'])
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='left')
tm.assert_index_equal(joined.index, f.index)
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='right')
tm.assert_index_equal(joined.index, f2.index)
tm.assert_index_equal(joined.columns, expected_columns)
# inner
joined = f.join(f2, how='inner')
tm.assert_index_equal(joined.index, f.index[5:10])
tm.assert_index_equal(joined.columns, expected_columns)
# outer
joined = f.join(f2, how='outer')
tm.assert_index_equal(joined.index, frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
tm.assert_raises_regex(
ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with tm.assert_raises_regex(ValueError, 'columns overlap but '
'no suffix'):
frame.join(frame, how=how)
def test_join_index_more(frame):
af = frame.loc[:, ['A', 'B']]
bf = frame.loc[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = frame['C'][::2]
expected['D'] = frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
result = af.join(bf, how='right')
tm.assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_join_index_series(frame):
df = frame.copy()
s = df.pop(frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, frame, check_names=False)
s.name = None
tm.assert_raises_regex(ValueError, 'must have a name', df.join, s)
def test_join_overlap(frame):
df1 = frame.loc[:, ['A', 'B', 'C']]
df2 = frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
no_overlap = frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(
columns=lambda x: '{key}{key}'.format(key=x))
joined_values = np.concatenate(
[frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
joined = frame_with_period_index.join(other)
expected = DataFrame(
data=joined_values,
columns=joined_cols,
index=frame_with_period_index.index)
tm.assert_frame_equal(joined, expected)
| bsd-3-clause |
mfouesneau/tap | examples/ipython_notebook.py | 2 | 26219 | """
Some tools for the notebooks
"""
from IPython.display import display, Markdown
try:
from nbconvert.filters.markdown import markdown2latex, markdown2html
except ImportError:
from IPython.nbconvert.filters.markdown import markdown2latex, markdown2html
from IPython.display import DisplayObject
import time as _time
import sys
class Caption(Markdown):
""" Make a caption to associate with figures """
def __init__(self, s, center=False, **kwargs):
Markdown.__init__(self, s, **kwargs)
self._center = center
def _repr_html_(self):
txt = markdown2html(self.data)
if self._center:
return '<center>{0}</center>'.format(txt)
else:
return '{0}'.format(txt)
def _repr_latex_(self):
txt = markdown2latex(self.data)
if self._center:
return '\\begin{center}\n' + txt + '\n\\end{center}'
else:
return txt
def display(self):
display(self)
def __str__(self):
return self._repr_latex_()
class Matrix(object):
""" Make a caption to associate with figures """
def __init__(self,s, fmt='%0.4g'):
self.s = s
self._fmt = fmt
def _repr_(self):
text = r"""\begin{bmatrix}"""
t = []
for k in self.s:
t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' )
text += ''.join(t)
text += r"""\end{bmatrix}"""
return Markdown(text)
def _repr_latex_(self):
text = r"""\begin{bmatrix}"""
t = []
for k in self.s:
t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' )
text += ''.join(t)
text += r"""\end{bmatrix}"""
return text
def __str__(self):
return self._repr_latex_()
def display(self):
display(self)
def disp_markdown(*args):
return display(Markdown(*args))
def load_latex_macros():
return disp_markdown(open('notebook_macros').read())
def add_input_toggle():
from IPython.display import HTML, display
r = HTML('''
<script>
$( document ).ready(function () {
IPython.CodeCell.options_default['cm_config']['lineWrapping'] = true;
IPython.notebook.get_selected_cell()
IPython.toolbar.add_buttons_group([
{
'label' : 'toggle all input cells',
'icon' : 'fa-eye-slash',
'callback': function(){ $('div.input').slideToggle(); }
}
]);
});
</script>
''')
display(r)
return r
def add_citation_button():
from IPython.display import HTML, display
r = HTML("""
<script>
function insert_citn() {
// Build paragraphs of cell type and count
var entry_box = $('<input type="text"/>');
var body = $('<div><p> Enter the Bibtex reference to insert </p><form>').append(entry_box)
.append('</form></div>');
// Show a modal dialog with the stats
IPython.dialog.modal({
notebook: IPython.notebook,
keyboard_manager: IPython.notebook.keyboard_manager,
title: "Bibtex reference insertion",
body: body,
open: function() {
// Submit on pressing enter
var that = $(this);
that.find('form').submit(function () {
that.find('.btn-primary').first().click();
return false;
});
entry_box.focus();
},
buttons : {
"Cancel" : {},
"Insert" : {
"class" : "btn-primary",
"click" : function() {
// Retrieve the selected citation, add to metadata,
var citation = entry_box.val();
// if (!citation) {return;}
var citn_html = '<cite data-cite="' + citation + '">' + citation + '</cite>';
var cell = IPython.notebook.get_selected_cell();
cell.code_mirror.replaceSelection(citn_html);
}
}
}
});
};
$( document ).ready(function () {
IPython.toolbar.add_buttons_group([
{
'label' : 'insert bibtex reference in markdown',
'icon' : 'fa-graduation-cap', // http://fontawesome.io/icons/
'callback': insert_citn,
}
]);
});
</script>
<style>
cite {
font-style: normal;
color: #45749e;
}
</style>
""")
display(r)
return r
class PDF(object):
def __init__(self,url):
self.url = url
def _repr_html_(self):
return '<iframe src=%s></iframe>' % self.url
def _repr_latex_(self):
return r'\begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{%s}\end{center}' % self.url
class Table(DisplayObject):
VDOTS = object()
def __init__(self, data, headings=None, formats=None, caption=None,
label=None, position='h', subtables=1):
"""
A HTML/LaTeX IPython DisplayObject Table
`data` should be a 2 dimensional array, indexed by row then column,
with an optional extra row `headings`.
A 'row' (i.e., an element of `data`) may also be
:py:const:`Table.VDOTS`, which produces vertical dots in all columns.
`formats` may be a string, whose format method will be used for every
cell; a function, called for every cell; or a mixed array of strings
and functions which is zipped with each row.
Headings are not formatted.
`caption` and `label` add the relevant LaTeX markup, and will go in
the first row of the HTML copy. `label` will have ``tab:`` prepended
to it.
If `subtables` is greater than 1, the table will be split into
`subtables` parts of approximately equal length, and laid out side
by side.
"""
if len(data) == 0:
raise ValueError("data is empty")
if label is None != caption is None:
raise ValueError("specify neither or both of label & caption")
self.columns = len(data[0])
if self.columns == 0:
raise ValueError("no columns")
if headings and len(headings) != self.columns:
raise ValueError("bad headings length")
if isinstance(formats, str):
formats = [formats.format] * self.columns
elif callable(formats):
formats = [formats] * self.columns
elif formats:
if len(formats) != self.columns:
raise ValueError("bad formats length")
def maybe_string_format(f):
if isinstance(f, str):
return f.format
else:
assert callable(f)
return f
formats = list(map(maybe_string_format, formats))
else:
formats = [self._default_format] * self.columns
for i, row in enumerate(data):
if row is not self.VDOTS and len(row) != self.columns:
raise ValueError("bad row length", i)
self.headings = headings
self.data = data
self.formats = formats
self.caption = caption
self.label = label
self.position = position
self.subtables = subtables
@staticmethod
def _default_format(what):
if isinstance(what, float):
return "{0:.5f}".format(what)
else:
return str(what)
def _format_rows(self):
for row in self.data:
if row is self.VDOTS:
yield self.VDOTS
else:
yield (f(x) for f, x in zip(self.formats, row))
def _subtables_split(self):
assert self.subtables > 1
rows = list(self._format_rows())
nominal_height = len(rows) // self.subtables
remainder = len(rows) % self.subtables
heights = [nominal_height] * self.subtables
for i in range(remainder):
heights[i] += 1
slices = []
acc = 0
for l in heights:
slices.append((acc, acc + l))
acc += l
assert slices[-1][1] == len(rows)
subtables = [rows[a:b] for a, b in slices]
return subtables
def _repr_latex_(self):
strings = []
strings.append(r"""
\begin{table}[""" + self.position + r"""]
\centering
""")
if self.label:
strings.append(r"\caption{" + self.caption + "}")
strings.append(r"\label{tab:" + self.label + "}")
if self.subtables > 1:
subtables = self._subtables_split()
width = "{:.3f}\linewidth".format(0.95 / self.subtables)
for i, rows in enumerate(subtables):
strings.append(r"\begin{{subtable}}[t]{{{0}}}%".format(width))
strings.append(r"""
\centering
\vspace{0pt}
""")
self._latex_tabular(strings, rows)
strings.append(r"\end{subtable}%")
if i != len(subtables) - 1:
strings.append("\hfill%")
else:
rows = self._format_rows()
self._latex_tabular(strings, rows)
strings.append(r"""
\end{table}
""")
return "\n".join(strings)
def _latex_tabular(self, strings, rows):
x = "|".join(["c"] * self.columns)
strings.append(r"\begin{tabular}{|" + x + "|}")
strings.append(r"\hline")
if self.headings:
latex = " & ".join(str(x) for x in self.headings)
strings.append(latex + r" \\")
strings.append(r"\hline")
for row in rows:
if row is self.VDOTS:
row = [r"\vdots"] * self.columns
latex = " & ".join(row)
strings.append(latex + r" \\")
strings.append(r"""
\hline
\end{tabular}%""")
def _repr_html_(self):
strings = []
strings.append("""
<style type="text/css">
.util_Table td { text-align: center; }
.util_Table tbody tr, .util_Table tbody td {
border-bottom: 0;
border-top: 0;
}
.util_Table_subtable {
float: left;
}
</style>
""")
if self.label:
c = self.caption
l = "<code>[{}]</code>".format(self.label)
strings.append("""
<h3>{1} {2}</h3>
""".format(self.columns, c, l))
if self.subtables > 1:
subtables = self._subtables_split()
# width = 0.95 / self.subtables
strings.append("<div class='clearfix'>")
for rows in subtables:
strings.append("<div class='util_Table_subtable'>")
self._html_table(strings, rows)
strings.append("</div>")
strings.append("</div>")
else:
rows = self._format_rows()
self._html_table(strings, rows)
return "\n".join(strings)
def _html_table(self, strings, rows):
strings.append("<table class='util_Table'>")
if self.headings:
strings.append("<thead>")
strings.append("<tr>")
headings = map("<th>{0}</th>".format, self.headings)
strings.append("\n".join(headings))
strings.append("</tr>")
strings.append("</thead>")
strings.append("<tbody>")
for row in rows:
if row is self.VDOTS:
row = ["\u22ee"] * self.columns
strings.append("<tr>")
row = map("<td>{0}</td>".format, row)
strings.append("\n".join(row))
strings.append("</tr>")
strings.append("</tbody>")
strings.append("</table>")
def __repr__(self):
if self.headings:
widths = [len(x) for x in self.headings]
data = [self.headings]
else:
widths = None
data = []
# don't forget - self._format_rows() is a generator that yields generators
for row in self._format_rows():
if row is self.VDOTS:
continue
r = list(row)
w = [len(x) for x in r]
if widths is None:
widths = w
else:
widths = [max(a, b) for a, b in zip(widths, w)]
data.append(list(r))
strings = []
if self.label:
c = self.caption.replace("\n", " ")
strings.append('Table: {0} ({1})'.format(self.label, c))
for row in data:
if row is self.VDOTS:
strings.append('...')
else:
r = [x.ljust(b + 4) for x, b in zip(row, widths)]
strings.append(''.join(r))
return '\n'.join(strings)
def __html__(self):
return self._repr_html_()
class LatexFigure(object):
extension = 'pdf'
def __init__(self, label, caption, fig=None, position="", star=False,
options='width=\columnwidth', margin=False):
"""
A LaTeX IPython DisplayObject Figure
`label` is mandatory, since it also sets the filename. It will
have ``fig:`` preprended to it.
`fig` is optional - the current figure (via ``gcf``) will be used
if it is not set.
`position` is either the float placement specifier or the subfigure
vertical position.
If `subfigure` is set to true, a subfigure with width `width` will
be created.
The figure is saved (via ``savefig``) as a PDF file in the current
directory.
Displaying the object produces LaTeX (only) to embed the figure.
A little hacky, but since this is meant for use in the notebook
it is assumed that the figure is going to be displayed automatically
in HTML independently.
"""
if fig is None:
from matplotlib.pyplot import gcf
fig = gcf()
self.label = label
self.caption = caption
self.fig = fig
self.position = position
self.options = options
self.star = star
self.margin = margin
self.filename = "figure_{0:s}.{1:s}".format(label, self.__class__.extension)
import pylab as plt
try:
plt.savefig(self.filename, bbox_inches='tight')
except:
plt.savefig(self.filename)
def _repr_html_(self):
# Bit crude. Hide ourselves to the notebook viewer, since we'll
# have been shown already anyway.
# Nicer solutions are afaict infeasible.
return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format(
label=self.label, caption=self.caption))
def _repr_latex_(self, subfigure=None):
if subfigure:
environment = "subfigure"
args = "[{position}]{{{width}}}".format(**subfigure)
else:
environment = "figure"
args = "[{0}]".format(self.position)
args = args.replace('[]', '')
if self.star:
environment += '*'
elif self.margin & (not subfigure):
environment = "margin" + environment
return r"""\begin{{{env:s}}}{args:s}
\centering
\includegraphics[{options:s}]{{{fname:s}}}
\caption{{{caption:s}}}
\label{{fig:{label:s}}}
\end{{{env:s}}}
""".format(env=environment, args=args, options=self.options,
fname=self.filename, caption=self.caption,
label=self.label)
def __repr__(self):
c = self.caption.replace("\n", " ")
return "Figure: {0} ({1})".format(self.label, c)
def __html__(self):
return ""
class LatexSubfigures(object):
def __init__(self, label, caption, figures, position='h',
subfigure_position='b', star=False):
"""
Displays several :cls:`LatexFigures` as sub-figures, two per row.
`figures` should be an array of :cls:`LatexFigure` objects, not
:cls:`matplotlib.Figure` objects.
"""
self.label = label
self.caption = caption
self.figures = figures
self.position = position
self.subfigure_position = subfigure_position
self.star = star
def _repr_html_(self):
# Bit crude. Hide ourselves to the notebook viewer, since we'll
# have been shown already anyway.
# Nicer solutions are afaict infeasible.
return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format(
label=self.label, caption=self.caption))
def _repr_latex_(self):
strings = []
environment = "figure"
if self.star:
environment += '*'
strings.append(r"""\begin{""" + environment + """}[""" + self.position + r"""]
\centering
""")
#left = True
#first = True
opts = {"position": self.subfigure_position,
"width": "{0:0.2f}\linewidth".format((1 - len(self.figures) * 0.01) / len(self.figures))}
for f in self.figures:
#if left and not first:
# strings.append(r"\vspace{1em}")
# have to be quite careful about whitespace
latex = f._repr_latex_(subfigure=opts).strip()
#if left:
# latex += '%'
#else:
# latex += r'\newline'
#first = False
#left = not left
strings.append(latex)
strings.append(r"""
\caption{""" + self.caption + r"""}
\label{fig:""" + self.label + r"""}
\end{""" + environment + """}
""")
return "\n".join(strings)
def __repr__(self):
c = self.caption.replace("\n", " ")
strings = ["Figure group: {0} ({1})".format(self.label, c)]
strings += [repr(x) for x in self.figures]
return "\n".join(strings)
def __html__(self):
return ""
class LatexNumberFormatter(object):
"""
Format floats in exponent notation using latex markup for the exponent
e.g., ``$-4.234 \\times 10^{-5}$``
Usage:
>>> fmtr = LatexNumberFormatter(sf=4)
>>> fmtr(-4.234e-5)
"$-4.234 \\\\times 10^{-5}$"
"""
def __init__(self, sf=10):
"""Create a callable object that formats numbers"""
self.sf = sf
self.s_fmt = "{{:.{0}e}}".format(self.sf)
def __call__(self, n):
"""Format `n`"""
n = self.s_fmt.format(n)
n, e, exp = n.partition("e")
if e == "e":
exp = int(exp)
if not n.startswith("-"):
n = r"\phantom{-}" + n
return r"${} \times 10^{{{}}}$".format(n, exp)
else:
return "${}$".format(n)
"""
Simple progressbar
==================
This package implement a unique progress bar class that can be used to decorate
an iterator, a function or even standalone.
The format of the meter is flexible and can display along with the progress
meter, the running time, an eta, and the rate of the iterations.
An example is:
description [----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
"""
class NBPbar(object):
"""
make a progress string in a shape of:
[----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
Attributes
---------
time: bool, optional (default: True)
if set, add the runtime information
eta: bool, optional (default: True)
if set, add an estimated time to completion
rate: bool, optional (default: True)
if set, add the rate information
length: int, optional (default: None)
number of characters showing the progress meter itself
if None, the meter will adapt to the buffer width
TODO: make it variable with the buffer length
keep: bool, optional (default: True)
If not set, deletes its traces from screen after completion
file: buffer
the buffer to write into
mininterval: float (default: 0.5)
minimum time in seconds between two updates of the meter
miniters: int, optional (default: 1)
minimum iteration number between two updates of the meter
units: str, optional (default: 'iters')
unit of the iteration
"""
def __init__(self, desc=None, maxval=None, time=True, eta=True, rate=True, length=None,
file=None, keep=True, mininterval=0.5, miniters=1, units='iters', **kwargs):
self.time = time
self.eta = eta
self.rate = rate
self.desc = desc or ''
self.units = units
self.file = file or sys.stdout
self._last_print_len = 0
self.keep = keep
self.mininterval = mininterval
self.miniters = miniters
self._auto_width = True
self.length = 10
if length is not None:
self.length = length
self._auto_width = False
# backward compatibility
self._start_t = _time.time()
self._maxval = maxval
if 'txt' in kwargs:
self.desc = kwargs['txt']
self._F = None
@staticmethod
def format_interval(t):
""" make a human readable time interval decomposed into days, hours,
minutes and seconds
Parameters
----------
t: int
interval in seconds
Returns
-------
txt: str
string representing the interval
(format: <days>d <hrs>:<min>:<sec>)
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
d, h = divmod(h, 24)
txt = '{m:02d}:{s:02d}'
if h:
txt = '{h:02d}:' + txt
if d:
txt = '{d:d}d ' + txt
return txt.format(d=d, h=h, m=m, s=s)
def build_str_meter(self, n, total, elapsed):
"""
make a progress string in a shape of:
k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
Parameters
----------
n: int
number of finished iterations
total: int
total number of iterations, or None
elapsed: int
number of seconds passed since start
Returns
-------
txt: str
string representing the meter
"""
if n > total:
total = None
vals = {'n': n}
vals['elapsed'] = self.format_interval(elapsed)
vals['rate'] = '{0:5.2f}'.format((n / elapsed)) if elapsed else '?'
vals['units'] = self.units
if not total:
txt = '{desc:s} {n:d}'
else:
txt = '{desc:s} {n:d}/{total:d} {percent:s}'
if self.time or self.eta or self.rate:
txt += ' ['
info = []
if self.time:
info.append('time: {elapsed:s}')
if self.eta and total:
info.append('eta: {left:s}')
if self.rate:
info.append('{rate:s} {units:s}/sec')
txt += ', '.join(info) + ']'
if not total:
return txt.format(**vals)
frac = float(n) / total
vals['desc'] = self.desc
vals['percent'] = '{0:3.0%}'.format(frac)
vals['left'] = self.format_interval(elapsed / n * (total - n)) if n else '?'
vals['total'] = total
return txt.format(**vals)
def print_status(self, n, total, elapsed):
from IPython.html.widgets import FloatProgress
desc = self.build_str_meter(n, total, elapsed)
if self._F is None:
self._F = FloatProgress(min=0, max=total, description=desc)
display(self._F)
self._F.value = n
self._F.description = desc
def iterover(self, iterable, total=None):
"""
Get an iterable object, and return an iterator which acts exactly like the
iterable, but prints a progress meter and updates it every time a value is
requested.
Parameters
----------
iterable: generator or iterable object
object to iter over.
total: int, optional
the number of iterations is assumed to be the length of the
iterator. But sometimes the iterable has no associated length or
its length is not the actual number of future iterations. In this
case, total can be set to define the number of iterations.
Returns
-------
gen: generator
pass the values from the initial iterator
"""
if total is None:
try:
total = len(iterable)
except TypeError:
total = self._maxval
self.print_status(0, total, 0)
last_print_n = 0
start_t = last_print_t = _time.time()
for n, obj in enumerate(iterable):
yield obj
if n - last_print_n >= self.miniters:
cur_t = _time.time()
if cur_t - last_print_t >= self.mininterval:
self.print_status(n, total, cur_t - start_t)
last_print_n = n
last_print_t = cur_t
if self.keep:
if last_print_n < n:
cur_t = _time.time()
self.print_status(n, total, cur_t - start_t)
self.file.write('\n')
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return False
def update(self, n, desc=None, total=None):
""" Kept for backward compatibility and the decorator feature """
if total is None:
total = self._maxval
if desc is not None:
self.desc = desc
cur_t = _time.time()
self.print_status(n, total, cur_t - self._start_t)
| mit |
scauglog/brain_record_toolbox | script_r448_r415_r451_signal_to_noise_ratio.py | 1 | 3511 | import pickle
import signal_processing as sig_proc
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import stats
import copy
#step phase analysis for each neuron and global
dir_name = '../data/r448/r448_131022_rH/'
img_ext = '.eps'
save_img = True
show = True
#signal filtering parameter
low_cut = 3e2
high_cut = 3e3
sp = sig_proc.Signal_processing(save_img, show, img_ext)
global_snr = []
print ('#### r448 ####')
trials = [2, 5, 6, 7]
dir_name = '../data/r448/r448_131022_rH/'
print('### spikes load ###')
with open(dir_name + 'data_processed', 'rb') as my_file:
record_data = pickle.load(my_file)
# record_data[trial] = {'spikes_values': all_chan_spikes_values,
# 'spikes_time': all_chan_spikes_times,
# 'spikes_classes': all_chan_spikes_classes,
# 'clusters': all_chan_clusters,
# 'length_signal': signal.shape[1],
# 'fs': fs }
signals = sp.load_m(dir_name + 'cell_trial.mat', 't')
fs = float(sp.load_m(dir_name + 'fech.mat', 'sampFreq'))
signal_noise_ratio_r448 = []
for trial in trials:
signal = np.transpose(signals[0][trial-1])
fsignal = sp.signal_mc_filtering(signal, low_cut, high_cut, fs)
for chan in range(len(record_data[trial]['clusters'])):
sig_mean = np.array(fsignal[chan]).mean()
sig_std = np.array(fsignal[chan]).std()
min_sig = sig_mean-2*sig_std
max_sig = sig_mean+2*sig_std
for cluster in record_data[trial]['clusters'][chan]:
if np.array(cluster.spikes_values).shape[0] > 0:
max_spike = np.array(cluster.spikes_values).max(1).mean()
min_spike = np.array(cluster.spikes_values).min(1).mean()
signal_noise_ratio_r448.append((max_spike-min_spike)/(max_sig-min_sig))
else:
signal_noise_ratio_r448.append(0)
global_snr.append(signal_noise_ratio_r448)
print ('#### r415 ####')
dir_name = '../data/r415/'
print('### spikes load ###')
with open(dir_name + 'data_processed', 'rb') as my_file:
record_data = pickle.load(my_file)
# record_data[trial] = {'spikes_values': all_chan_spikes_values,
# 'spikes_time': all_chan_spikes_times,
# 'spikes_classes': all_chan_spikes_classes,
# 'clusters': all_chan_clusters,
# 'length_signal': signal.shape[1],
# 'fs': fs }
signal = sp.load_m(dir_name + 'r415_130926.mat', 'd')
fs = float(sp.load_m(dir_name + 'fech.mat', 'sampFreq'))
signal_noise_ratio_r415 = []
fsignal = sp.signal_mc_filtering(signal, low_cut, high_cut, fs)
for chan in range(len(record_data['130926']['clusters'])):
sig_mean = np.array(fsignal[chan]).mean()
sig_std = np.array(fsignal[chan]).std()
min_sig = sig_mean-2*sig_std
max_sig = sig_mean+2*sig_std
for cluster in record_data['130926']['clusters'][chan]:
if np.array(cluster.spikes_values).shape[0]>0:
max_spike = np.array(cluster.spikes_values).max(1).mean()
min_spike = np.array(cluster.spikes_values).min(1).mean()
signal_noise_ratio_r415.append((max_spike-min_spike)/(max_sig-min_sig))
else:
signal_noise_ratio_r415.append(0)
global_snr.append(signal_noise_ratio_r415)
plt.figure()
plt.boxplot(global_snr)
if save_img:
plt.savefig('box_plot_snr_r448_r415'+img_ext, bbox_inches='tight')
if show:
plt.show()
else:
plt.close() | mit |
lemiere/python-lecture | tp_aleatoire/exemples/random_0.py | 1 | 1359 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Lemiere Yves
# Juillet 2017
import matplotlib.pyplot as plt
import random
def bunch_of_random_integer(param_min,param_max,number_of_sample):
tmp_list = []
for i in range(number_of_sample):
tmp_list.append(random.randint(param_min,param_max))
return tmp_list
def bunch_of_random_real(param_min,param_max,number_of_sample):
tmp_list = []
for i in range(number_of_sample):
tmp_list.append(random.uniform(param_min,param_max))
return tmp_list
## Ceci est le programme principal
if __name__ == "__main__":
debug = True
if debug:
print("************************")
print("* Welcome in random_0 *")
print("************************\n")
random.seed(1)
print("Define the expected random distribution")
in_N = input("Number of values to generate ? ")
in_min_value = input("Minimum value from range ? ")
in_max_value = input("Maximum value from range ? ")
data = bunch_of_random_integer(int(in_min_value),int(in_max_value),int(in_N))
# Display
plt.hist(data,20, facecolor='g', alpha=0.75)
plt.show()
data = []
data = bunch_of_random_real(int(in_min_value),int(in_max_value),int(in_N))
# Display
plt.hist(data,20, facecolor='r', alpha=0.75)
plt.show()
| gpl-3.0 |
google/brain-tokyo-workshop | WANNRelease/prettyNEAT/domain/make_env.py | 1 | 2041 | import numpy as np
import gym
from matplotlib.pyplot import imread
def make_env(env_name, seed=-1, render_mode=False):
# -- Bullet Environments ------------------------------------------- -- #
if "Bullet" in env_name:
import pybullet as p # pip install pybullet
import pybullet_envs
import pybullet_envs.bullet.kukaGymEnv as kukaGymEnv
# -- Bipedal Walker ------------------------------------------------ -- #
if (env_name.startswith("BipedalWalker")):
if (env_name.startswith("BipedalWalkerHardcore")):
import Box2D
from domain.bipedal_walker import BipedalWalkerHardcore
env = BipedalWalkerHardcore()
elif (env_name.startswith("BipedalWalkerMedium")):
from domain.bipedal_walker import BipedalWalker
env = BipedalWalker()
env.accel = 3
else:
from domain.bipedal_walker import BipedalWalker
env = BipedalWalker()
# -- VAE Racing ---------------------------------------------------- -- #
elif (env_name.startswith("VAERacing")):
from domain.vae_racing import VAERacing
env = VAERacing()
# -- Classification ------------------------------------------------ -- #
elif (env_name.startswith("Classify")):
from domain.classify_gym import ClassifyEnv
if env_name.endswith("digits"):
from domain.classify_gym import digit_raw
trainSet, target = digit_raw()
if env_name.endswith("mnist256"):
from domain.classify_gym import mnist_256
trainSet, target = mnist_256()
env = ClassifyEnv(trainSet,target)
# -- Cart Pole Swing up -------------------------------------------- -- #
elif (env_name.startswith("CartPoleSwingUp")):
from domain.cartpole_swingup import CartPoleSwingUpEnv
env = CartPoleSwingUpEnv()
if (env_name.startswith("CartPoleSwingUp_Hard")):
env.dt = 0.01
env.t_limit = 200
# -- Other -------------------------------------------------------- -- #
else:
env = gym.make(env_name)
if (seed >= 0):
domain.seed(seed)
return env | apache-2.0 |
wkentaro/fcn | examples/apc2016/datasets/rbo.py | 1 | 2463 | import glob
import os
import os.path as osp
import re
import chainer
import numpy as np
import scipy.misc
from sklearn.model_selection import train_test_split
from base import APC2016DatasetBase
class APC2016rboDataset(APC2016DatasetBase):
def __init__(self, data_type):
assert data_type in ('train', 'val')
self.dataset_dir = chainer.dataset.get_dataset_directory(
'apc2016/APC2016rbo')
data_ids = self._get_ids()
ids_train, ids_val = train_test_split(
data_ids, test_size=0.25, random_state=1234)
if data_type == 'train':
self._ids = ids_train
else:
self._ids = ids_val
def __len__(self):
return len(self._ids)
def _get_ids(self):
ids = []
for img_file in os.listdir(self.dataset_dir):
if not re.match(r'^.*_[0-9]*_bin_[a-l].jpg$', img_file):
continue
data_id = osp.splitext(img_file)[0]
ids.append(data_id)
return ids
def _load_from_id(self, data_id):
img_file = osp.join(self.dataset_dir, data_id + '.jpg')
img = scipy.misc.imread(img_file)
# generate label from mask files
lbl = np.zeros(img.shape[:2], dtype=np.int32)
# shelf bin mask file
shelf_bin_mask_file = osp.join(self.dataset_dir, data_id + '.pbm')
shelf_bin_mask = scipy.misc.imread(shelf_bin_mask_file, mode='L')
lbl[shelf_bin_mask < 127] = -1
# object mask files
mask_glob = osp.join(self.dataset_dir, data_id + '_*.pbm')
for mask_file in glob.glob(mask_glob):
mask_id = osp.splitext(osp.basename(mask_file))[0]
mask = scipy.misc.imread(mask_file, mode='L')
lbl_name = mask_id[len(data_id + '_'):]
lbl_id = self.label_names.index(lbl_name)
lbl[mask > 127] = lbl_id
return img, lbl
def get_example(self, i):
data_id = self._ids[i]
img, lbl = self._load_from_id(data_id)
datum = self.img_to_datum(img)
return datum, lbl
if __name__ == '__main__':
import matplotlib.pyplot as plt
import six
dataset_train = APC2016rboDataset('train')
dataset_val = APC2016rboDataset('val')
print('train: %d, val: %d' % (len(dataset_train), len(dataset_val)))
for i in six.moves.range(len(dataset_val)):
viz = dataset_val.visualize_example(i)
plt.imshow(viz)
plt.show()
| mit |
JavascriptMick/deeplearning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
sheabrown/faraday_complexity | final/plots.py | 1 | 15250 | # Use inception class to access these
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from keras.utils import plot_model
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
from keras.models import load_model
class plots:
"""
Class for making plots for the inception model.
Functions
_plotCNN
_plotF1
_plotParamProb
_plotROC
"""
def _plotCNN(self, to_file='graph.png'):
plot_model(self.model_, to_file=to_file)
def _plotROC(self, data='test', save=False, to_file='roc.pdf', fontsize=20):
"""
Function for plotting the ROC curve.
To call:
"""
try:
self.fpr_
self.tpr_
except:
self._getROC(data)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.fpr_, self.tpr_)
plt.xlabel(r'$\rm FPR$', fontsize=fontsize)
plt.ylabel(r'$\rm TPR$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotF1(self, step=0.025, save=False, to_file='f1_score.pdf', fontsize=20):
"""
Function for plotting the F1 score as a function
of the threshold probability.
To call:
_plotF1(step, save=False, to_file, fontsize=20)
Parameters:
step stepsize to take (0.5 to 1.0)
save (boolean) save image
to_file file to save image to
fontsize fontsize of axis labels
"""
try:
self.threshold_
self.F1_
except:
self._getF1(step)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.threshold_, self.F1_)
plt.xlabel(r'$p_\mathrm{cutoff}$', fontsize=fontsize)
plt.ylabel(r'$F_{1} \, \mathrm{score}$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotParamProb(self, param, kind='kde', gridsize=50, save=False, to_file="FluxProb.pdf", fontscale=1.25):
"""
Function for plotting a parameter of the second
component against its probability of being
complex, as measured by the model.
To call:
_plotFluxProb(param, kind, gridsize, save, imfile, fontscale)
Parameters:
param column name in self.dfComplex_
kind seaborn jointplot params: "kde", "hex", etc.
gridsize smoothing parameter
save (boolean) save image
imfile filepath to save image
fontscale axes label scaling
"""
try:
self.dfComplex_
except:
self._getComplexParams()
# ===================================================
# Dictionary for x-axis label
# ===================================================
label = {
"flux": r'$F_{2}$',
"depth": r'$\Delta \phi$',
"chi": r'$\Delta \chi$',
"sig": r'$\sigma_\mathrm{noise}$'
}
# ===================================================
# 1) Retrieve the flux of the second component
# 2) Retrieve the model's probability that the
# source is complex
# ===================================================
valu = pd.Series(self.dfComplex_[param], name=label[param])
prob = pd.Series(self.dfComplex_["prob"], name=r'$p_\mathrm{complex}$')
# ===================================================
# Create the plot
# ===================================================
sns.set(font_scale=fontscale)
sns.jointplot(valu, prob, kind=kind, gridsize=gridsize)
# ===================================================
# Save or display the image
# ===================================================
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotBinaryParamProb(self, param, save=False, to_file='param_binary.pdf', fontsize=20,
s=10, alpha=0.05, cComplex='darkorange', cSimple='dodgerblue'):
plt.figure()
plt.scatter(self.dfSimple_[param], self.dfSimple_['prob'], color=cSimple, alpha=alpha, s=s)
plt.scatter(self.dfComplex_[param], self.dfComplex_['prob'], color=cComplex, alpha=alpha, s=s)
plt.xlabel(r'$\sigma$', fontsize=fontsize)
plt.ylabel(r'$p_\mathrm{complex}$', fontsize=fontsize)
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotLoss(self, logfile=None, save=False, to_file='loss_vs_epoch.pdf', fontsize=20):
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# -------------- Initialize the Graph ---------
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'$\rm Epoch$', fontsize=fontsize)
plt.ylabel(r'$\rm Loss$', fontsize=fontsize)
plt.plot(self.dfLog_.index, self.dfLog_['loss'], label='Training Loss')
plt.plot(self.dfLog_.index, self.dfLog_['val_loss'], label='Validation Loss')
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
def _plotAcc(self, logfile=None, save=False, to_file='acc_vs_epoch.pdf', fontsize=20):
"""
Function for plotting the accuracy as a function of epoch.
To call:
_plotAcc(logfile, save, imfile)
Parameters:
"""
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# ===================================================
# Plot accuracy vs epoch
# ===================================================
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.dfLog_.index, self.dfLog_['binary_accuracy'], label='Training Binary Accuracy')
plt.plot(self.dfLog_.index, self.dfLog_['val_binary_accuracy'], label='Validation Binary Accuracy')
plt.xlabel('Epoch', fontsize=fontsize)
plt.ylabel('Binary Accuracy ', fontsize=fontsize)
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
'''
def _loadData(self, directory):
"""
Function for loading data arrays from a directory.
To call:
_loadModel(directory)
Parameters:
directory
"""
self.X_data = np.load(directory+'X_data.npy')
self.Y_data = np.load(directory+'label.npy')
#------ creation params --------
self.chi_data = np.load(directory+'chi.npy')
self.depth_data = np.load(directory+'depth.npy')
self.flux_data = np.load(directory+'flux.npy')
self.q_data = np.load(directory+'Q_data.npy')
self.s_data = np.load(directory+'S_data.npy')
self.sig_data = np.load(directory+'sig.npy')
self.u_data = np.load(directory+'U_data.npy')
'''
def _format_param_name(self, param_name):
"""
Function for formatting a string parameter name (chi, depth, etc....) to LateX
form for plot labels.
To call:
_format_param_name(param_name)
Parameters:
param_name
"""
if param_name == 'sigma':
return r'$\sigma$'
elif param_name == 'chi':
return r'$\Delta\chi$'
elif param_name == 'flux':
return r'$\Delta F$'
elif param_name == 'depth':
return r'$\Delta \phi$'
else:
return param_name
def _make_cut(self, param_array, param_name,num_cut=10,prob=0.5, save=False):
"""
Function for cutting along a single parameter value to test the model's performance over
a parameter range. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameter.
To call:
_make_cut(param_array, param_name,num_cut, prob, save)
Parameters:
param_array
param_name
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
cut_array = param_array
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals = np.linspace(0.,np.max(cut_array)[0]*.9,num_cut)
oned =False
except:
cut_vals = np.linspace(0.,np.max(cut_array)*.9,num_cut)
oned = True
matrix_vals = []
# --------- make a series of cuts and save results for plotting ----------
for c in cut_vals:
print (c)
#do the cut
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for i in range(len(cut_array)):
val = cut_array[i]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val) == tuple_check:
if abs(val[0]-val[1]) >= c:
postcut.append(abs(val[0]-val[1]))
kept.append(i)
else:
if val >= c:
postcut.append(val)
kept.append(i)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = np.where(probs > prob, 1, 0)
'''
#------------ Confusion Matrix -------------
[simple marked as simple simple marked as complex]
[complex marked as simple complex marked as complex]
'''
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals.append(cm)
except:
print ('Nothing in that cutoff, continuing...')
fstring = self._format_param_name(param_name)
fig = plt.figure(1)
try:
plt.scatter(cut_vals,[float(matrix_vals[i][0,0])/(matrix_vals[i][0,0]+matrix_vals[i][0,1])*100. for i in range(len(matrix_vals))],label='True Simple',c='g')
except:
print ('No simple sources in subsample...')
try:
plt.scatter(cut_vals,[float(matrix_vals[i][1,1])/(matrix_vals[i][1,0]+matrix_vals[i][1,1])*100. for i in range(len(matrix_vals))],label='True Complex',c='b')
except:
print ('No complex sources in subsample...')
plt.xlabel(fstring)
plt.ylabel('Percent Correct')
plt.title(r'Percent Correct over '+fstring)
plt.legend(loc=(0.3,0.8),fontsize=5)
if save:
plt.savefig(param_name+'_plot.png',bbinches='tight')
else:
plt.show()
plt.close()
def _make_2d_cut(self, param_arr1, arr_name1, param_arr2, arr_name2,num_cut=10,prob=0.5,save=False):
"""
Function for cutting along two parameter values to test the model's performance over
a parameter space. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameters. Functions similarly to _make_cut() above.
To call:
_make_2d_cut(param_arr1, arr_name1, param_arr2, arr_name2, num_cut, prob, save)
Parameters:
param_arr1
arr_name1
param_arr2
arr_name2
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals1 = np.linspace(0.,np.max(param_arr1)[0]*.9,num_cut)
except:
cut_vals1 = np.linspace(0.,np.max(param_arr1)*.9,num_cut)
try:
cut_vals2 = np.linspace(0.,np.max(param_arr2)[0]*.9,num_cut)
except:
cut_vals2 = np.linspace(0.,np.max(param_arr2)*.9,num_cut)
matrix_vals_c = np.zeros((len(cut_vals1),len(cut_vals2)))
matrix_vals_s = np.zeros((len(cut_vals1),len(cut_vals2)))
# --------- make a series of cuts and save results for plotting ----------
for i in range(len(cut_vals1)):
for j in range(len(cut_vals2)):
#do the cut
c1 = cut_vals1[i]; c2 = cut_vals2[j]
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for k in range(len(param_arr1)):
val1 = param_arr1[k]
val2 = param_arr2[k]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val1) == tuple_check:
if abs(val1[0]-val1[1]) >= c1 and abs(val2[0]-val2[1]) >= c2:
kept.append(k)
else:
if val1 >= c1 and val2 >= c2:
kept.append(k)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = np.where(probs > prob, 1, 0)
'''
#------------ Confusion Matrix -------------
[simple marked as simple simple marked as complex]
[complex marked as simple complex marked as complex]
'''
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals_c[i,j] = float(cm[1,1])/(cm[1,0] +cm[1,1])*100.
matrix_vals_s[i,j] = float(cm[0,0])/(cm[0,0] +cm[0,1])*100
except:
print ('Nothing in that cutoff, continuing...')
fstring1 = self._format_param_name(arr_name1)
fstring2 = self._format_param_name(arr_name2)
xv,yv = np.meshgrid(cut_vals1,cut_vals2)
zv_complex = matrix_vals_c
zv_simple = matrix_vals_s
#------- show data as an image with z-axis being the TP/TN rates ----
fig,ax = plt.subplots(1,2,sharey=True,figsize=(12,7))
cax = ax[0].imshow(zv_complex,vmin=50., vmax=100.,cmap='seismic')#,origin='lower')
sax = ax[1].imshow(zv_simple,vmin=50., vmax=100.,cmap='seismic')#,origin='lower')
# ---- set the axis labels ------
ax[0].set_xlabel(fstring1)
ax[0].set_ylabel(fstring2)
ax[1].set_xlabel(fstring1)
ax[1].set_ylabel(fstring2)
# ---------- set the tick labels ---------
ax[0].set_xticks([n for n in range(len(cut_vals1))])
ax[0].set_yticks(range(len(cut_vals2)))
ax[1].set_xticks([n for n in range(len(cut_vals2))])
xlabels = ['%.2f'%(c) for c in cut_vals1]
ylabels = ['%.2f'%(c) for c in cut_vals2]
ax[0].set_xticklabels(xlabels)
ax[0].set_yticklabels(ylabels)
ax[1].set_xticklabels(xlabels)
ax[1].set_yticklabels(ylabels)
#-------- adjust plot sizing and add colorbar ----------
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(cax, cax=cbar_ax)
ax[0].set_title('Complex Sources')
ax[1].set_title('Simple Sources')
plt.suptitle(r'Percent Correct over '+fstring1+' and '+fstring2)
if save:
plt.savefig(arr_name1+'_'+arr_name2+'_plot.png',bbinches='tight')
else:
plt.show()
plt.close()
if __name__ == '__main__':
testing = plots()
#testing._loadLog('train.log')
#testing._plotLoss('train.log',save=False)
#testing._plotAcc('train.log',save=False)
testing._loadModel('../regularized/model_V1.h5')
testing._loadData('../data/test/')
#testing._make_cut(testing.chi_data, 'chi')
testing._make_2d_cut(testing.chi_data[:1000], 'chi',testing.flux_data[:1000], 'flux', num_cut=25)
| mit |
loli/sklearn-ensembletrees | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/examples/tutorials/input_fn/boston.py | 19 | 2448 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_cols, hidden_units=[10, 10])
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
print("Predictions: {}".format(str(y)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
tonnrueter/pymca_devel | PyMca/MaskImageWidget.py | 1 | 83455 | #/*##########################################################################
# Copyright (C) 2004-2013 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
__author__ = "V.A. Sole - ESRF Software Group"
import sys
import os
import numpy
from PyMca import RGBCorrelatorGraph
qt = RGBCorrelatorGraph.qt
IconDict = RGBCorrelatorGraph.IconDict
QTVERSION = qt.qVersion()
if hasattr(qt, "QString"):
QString = qt.QString
else:
QString = qt.safe_str
MATPLOTLIB = False
if QTVERSION > '4.0.0':
try:
from PyMca import QPyMcaMatplotlibSave
MATPLOTLIB = True
except ImportError:
MATPLOTLIB = False
else:
qt.QIcon = qt.QIconSet
from PyMca import ColormapDialog
from PyMca import spslut
from PyMca import PyMcaDirs
from PyMca import ArraySave
try:
from PyMca import ProfileScanWidget
except ImportError:
print("MaskImageWidget importing ProfileScanWidget directly")
import ProfileScanWidget
try:
from PyMca import SpecfitFuns
except ImportError:
print("MaskImageWidget importing SpecfitFuns directly")
import SpecfitFuns
COLORMAPLIST = [spslut.GREYSCALE, spslut.REVERSEGREY, spslut.TEMP,
spslut.RED, spslut.GREEN, spslut.BLUE, spslut.MANY]
if QTVERSION > '4.0.0':
from PyQt4 import Qwt5
try:
from PyMca import QwtPlotItems
OVERLAY_DRAW = True
except ImportError:
OVERLAY_DRAW = False
else:
OVERLAY_DRAW = False
import Qwt5
DEFAULT_COLORMAP_INDEX = 2
DEFAULT_COLORMAP_LOG_FLAG = False
DEBUG = 0
# set this variable to false if you get crashes when moving the mouse
# over the images.
# Before I thought it had to do with the Qt version used, but it seems
# to be related to old PyQwt versions. (In fact, the 5.2.1 version is
# a recent snapshot)
if Qwt5.QWT_VERSION_STR < '5.2.1':
USE_PICKER = False
else:
USE_PICKER = True
# Uncomment next line if you experience crashes moving the mouse on
# top of the images
#USE_PICKER = True
def convertToRowAndColumn(x, y, shape, xScale=None, yScale=None, safe=True):
if xScale is None:
c = x
else:
if x < xScale[0]:
x = xScale[0]
c = shape[1] *(x - xScale[0]) / (xScale[1] - xScale[0])
if yScale is None:
r = y
else:
if y < yScale[0]:
y = yScale[0]
r = shape[0] *(y - yScale[0]) / (yScale[1] - yScale[0])
if safe:
c = min(int(c), shape[1] - 1)
r = min(int(r), shape[0] - 1)
return r, c
class MyPicker(Qwt5.QwtPlotPicker):
def __init__(self, *var):
Qwt5.QwtPlotPicker.__init__(self, *var)
self.__text = Qwt5.QwtText()
self.data = None
self.xScale = None
self.yScale = None
if USE_PICKER:
def trackerText(self, var):
d=self.invTransform(var)
if self.data is None:
self.__text.setText("%g, %g" % (d.x(), d.y()))
else:
x = d.x()
y = d.y()
r, c = convertToRowAndColumn(x, y, self.data.shape,
xScale=self.xScale,
yScale=self.yScale, safe=True)
z = self.data[r, c]
self.__text.setText("%.1f, %.1f, %.4g" % (x, y, z))
return self.__text
class MaskImageWidget(qt.QWidget):
def __init__(self, parent = None, rgbwidget=None, selection=True, colormap=False,
imageicons=True, standalonesave=True, usetab=False,
profileselection=False, scanwindow=None):
qt.QWidget.__init__(self, parent)
if QTVERSION < '4.0.0':
self.setIcon(qt.QPixmap(IconDict['gioconda16']))
self.setCaption("PyMca - Image Selection Tool")
profileselection = False
else:
self.setWindowIcon(qt.QIcon(qt.QPixmap(IconDict['gioconda16'])))
self.setWindowTitle("PyMca - Image Selection Tool")
if 0:
screenHeight = qt.QDesktopWidget().height()
if screenHeight > 0:
self.setMaximumHeight(int(0.99*screenHeight))
self.setMinimumHeight(int(0.5*screenHeight))
screenWidth = qt.QDesktopWidget().width()
if screenWidth > 0:
self.setMaximumWidth(int(screenWidth)-5)
self.setMinimumWidth(min(int(0.5*screenWidth),800))
self._y1AxisInverted = False
self.__selectionMask = None
self.__imageData = None
self.__image = None
self._xScale = None
self._yScale = None
self.colormap = None
self.colormapDialog = None
self.setDefaultColormap(DEFAULT_COLORMAP_INDEX,
DEFAULT_COLORMAP_LOG_FLAG)
self.rgbWidget = rgbwidget
self.__imageIconsFlag = imageicons
self.__selectionFlag = selection
self.__useTab = usetab
self.mainTab = None
self._build(standalonesave, profileselection=profileselection)
self._profileSelectionWindow = None
self._profileScanWindow = scanwindow
self.__brushMenu = None
self.__brushMode = False
self.__eraseMode = False
self.__connected = True
self.__setBrush2()
self.outputDir = None
self._saveFilter = None
self._buildConnections()
self._matplotlibSaveImage = None
# the overlay items to be drawn
self._overlayItemsDict = {}
# the last overlay legend used
self.__lastOverlayLegend = None
# the projection mode
self.__lineProjectionMode = 'D'
def _build(self, standalonesave, profileselection=False):
self.mainLayout = qt.QVBoxLayout(self)
self.mainLayout.setMargin(0)
self.mainLayout.setSpacing(0)
if self.__useTab:
self.mainTab = qt.QTabWidget(self)
#self.graphContainer =qt.QWidget()
#self.graphContainer.mainLayout = qt.QVBoxLayout(self.graphContainer)
#self.graphContainer.mainLayout.setMargin(0)
#self.graphContainer.mainLayout.setSpacing(0)
self.graphWidget = RGBCorrelatorGraph.RGBCorrelatorGraph(self,
selection = self.__selectionFlag,
colormap=True,
imageicons=self.__imageIconsFlag,
standalonesave=False,
standalonezoom=False,
profileselection=profileselection)
self.mainTab.addTab(self.graphWidget, 'IMAGES')
else:
if QTVERSION < '4.0.0':
self.graphWidget = RGBCorrelatorGraph.RGBCorrelatorGraph(self,
selection = self.__selectionFlag,
colormap=True,
imageicons=self.__imageIconsFlag,
standalonesave=True,
standalonezoom=False)
standalonesave = False
else:
self.graphWidget = RGBCorrelatorGraph.RGBCorrelatorGraph(self,
selection =self.__selectionFlag,
colormap=True,
imageicons=self.__imageIconsFlag,
standalonesave=False,
standalonezoom=False,
profileselection=profileselection)
#for easy compatibility with RGBCorrelatorGraph
self.graph = self.graphWidget.graph
if profileselection:
self.connect(self.graphWidget,
qt.SIGNAL('PolygonSignal'),
self._polygonSignalSlot)
if standalonesave:
self.buildStandaloneSaveMenu()
self.connect(self.graphWidget.zoomResetToolButton,
qt.SIGNAL("clicked()"),
self._zoomResetSignal)
self.graphWidget.picker = MyPicker(Qwt5.QwtPlot.xBottom,
Qwt5.QwtPlot.yLeft,
Qwt5.QwtPicker.NoSelection,
Qwt5.QwtPlotPicker.CrossRubberBand,
Qwt5.QwtPicker.AlwaysOn,
self.graphWidget.graph.canvas())
self.graphWidget.picker.setTrackerPen(qt.QPen(qt.Qt.black))
self.graphWidget.graph.enableSelection(False)
self.graphWidget.graph.enableZoom(True)
if self.__selectionFlag:
if self.__imageIconsFlag:
self.setSelectionMode(False)
else:
self.setSelectionMode(True)
self._toggleSelectionMode()
if self.__useTab:
self.mainLayout.addWidget(self.mainTab)
else:
self.mainLayout.addWidget(self.graphWidget)
def buildStandaloneSaveMenu(self):
self.connect(self.graphWidget.saveToolButton,
qt.SIGNAL("clicked()"),
self._saveToolButtonSignal)
self._saveMenu = qt.QMenu()
self._saveMenu.addAction(QString("Image Data"),
self.saveImageList)
self._saveMenu.addAction(QString("Colormap Clipped Seen Image Data"),
self.saveClippedSeenImageList)
self._saveMenu.addAction(QString("Clipped and Subtracted Seen Image Data"),
self.saveClippedAndSubtractedSeenImageList)
self._saveMenu.addAction(QString("Standard Graphics"),
self.graphWidget._saveIconSignal)
if QTVERSION > '4.0.0':
if MATPLOTLIB:
self._saveMenu.addAction(QString("Matplotlib") ,
self._saveMatplotlibImage)
def _buildConnections(self, widget = None):
self.connect(self.graphWidget.hFlipToolButton,
qt.SIGNAL("clicked()"),
self._hFlipIconSignal)
self.connect(self.graphWidget.colormapToolButton,
qt.SIGNAL("clicked()"),
self.selectColormap)
if self.__selectionFlag:
self.connect(self.graphWidget.selectionToolButton,
qt.SIGNAL("clicked()"),
self._toggleSelectionMode)
text = "Toggle between Selection\nand Zoom modes"
if QTVERSION > '4.0.0':
self.graphWidget.selectionToolButton.setToolTip(text)
if self.__imageIconsFlag:
self.connect(self.graphWidget.imageToolButton,
qt.SIGNAL("clicked()"),
self._resetSelection)
self.connect(self.graphWidget.eraseSelectionToolButton,
qt.SIGNAL("clicked()"),
self._setEraseSelectionMode)
self.connect(self.graphWidget.rectSelectionToolButton,
qt.SIGNAL("clicked()"),
self._setRectSelectionMode)
self.connect(self.graphWidget.brushSelectionToolButton,
qt.SIGNAL("clicked()"),
self._setBrushSelectionMode)
self.connect(self.graphWidget.brushToolButton,
qt.SIGNAL("clicked()"),
self._setBrush)
if QTVERSION < "4.0.0":
self.connect(self.graphWidget.graph,
qt.PYSIGNAL("QtBlissGraphSignal"),
self._graphSignal)
else:
if self.__imageIconsFlag:
self.connect(self.graphWidget.additionalSelectionToolButton,
qt.SIGNAL("clicked()"),
self._additionalSelectionMenuDialog)
self._additionalSelectionMenu = qt.QMenu()
self._additionalSelectionMenu.addAction(QString("Reset Selection"),
self._resetSelection)
self._additionalSelectionMenu.addAction(QString("Invert Selection"),
self._invertSelection)
self._additionalSelectionMenu.addAction(QString("I >= Colormap Max"),
self._selectMax)
self._additionalSelectionMenu.addAction(QString("Colormap Min < I < Colormap Max"),
self._selectMiddle)
self._additionalSelectionMenu.addAction(QString("I <= Colormap Min"),
self._selectMin)
self.connect(self.graphWidget.graph,
qt.SIGNAL("QtBlissGraphSignal"),
self._graphSignal)
def _polygonSignalSlot(self, ddict):
if DEBUG:
print("_polygonSignalSLot, event = %s" % ddict['event'])
print("Received ddict = ", ddict)
if ddict['event'] in [None, "NONE"]:
#Nothing to be made
return
if ddict['event'] == "PolygonWidthChanged":
if self.__lastOverlayLegend is not None:
legend = self.__lastOverlayLegend
if legend in self._overlayItemsDict:
info = self._overlayItemsDict[legend]['info']
if info['mode'] == ddict['mode']:
newDict = {}
newDict.update(info)
newDict['pixelwidth'] = ddict['pixelwidth']
self._polygonSignalSlot(newDict)
return
if self._profileSelectionWindow is None:
if self._profileScanWindow is None:
#identical to the standard scan window
self._profileSelectionWindow = ProfileScanWidget.ProfileScanWidget(actions=False)
else:
self._profileSelectionWindow = ProfileScanWidget.ProfileScanWidget(actions=True)
self.connect(self._profileSelectionWindow,
qt.SIGNAL('addClicked'),
self._profileSelectionSlot)
self.connect(self._profileSelectionWindow,
qt.SIGNAL('removeClicked'),
self._profileSelectionSlot)
self.connect(self._profileSelectionWindow,
qt.SIGNAL('replaceClicked'),
self._profileSelectionSlot)
self._interpolate = SpecfitFuns.interpol
#if I do not return here and the user interacts with the graph while
#the profileSelectionWindow is not shown, I get crashes under Qt 4.5.3 and MacOS X
#when calling _getProfileCurve
return
self._updateProfileCurve(ddict)
def _updateProfileCurve(self, ddict):
curve = self._getProfileCurve(ddict)
if curve is None:
return
xdata, ydata, legend, info = curve
replot=True
replace=True
idx = numpy.isfinite(ydata)
xdata = xdata[idx]
ydata = ydata[idx]
self._profileSelectionWindow.addCurve(xdata, ydata,
legend=legend,
info=info,
replot=replot,
replace=replace)
def getGraphTitle(self):
try:
title = self.graphWidget.graph.title().text()
if sys.version < '3.0':
title = qt.safe_str(title)
except:
title = ""
return title
def setLineProjectionMode(self, mode):
"""
Set the line projection mode.
mode: 1 character string. Allowed options 'D', 'X' 'Y'
D - Plot the intensity over the drawn line over as many intervals as pixels over the axis
containing the longest projection in pixels.
X - Plot the intensity over the drawn line over as many intervals as pixels over the X axis
Y - Plot the intensity over the drawn line over as many intervals as pixels over the Y axis
"""
m = mode.upper()
if m not in ['D', 'X', 'Y']:
raise ValueError("Invalid mode %s. It has to be 'D', 'X' or 'Y'")
self.__lineProjectionMode = m
def getLineProjectionMode(self):
return self.__lineProjectionMode
def _getProfileCurve(self, ddict, image=None, overlay=OVERLAY_DRAW):
if image is None:
imageData = self.__imageData
else:
imageData = image
if imageData is None:
return None
title = self.getGraphTitle()
self._profileSelectionWindow.setTitle(title)
if self._profileScanWindow is not None:
self._profileSelectionWindow.label.setText(title)
#showing the profileSelectionWindow now can make the program crash if the workaround mentioned above
#is not implemented
self._profileSelectionWindow.show()
#self._profileSelectionWindow.raise_()
if ddict['event'] == 'PolygonModeChanged':
return
#if I show the image here it does not crash, but it is not nice because
#the user would get the profileSelectionWindow under his mouse
#self._profileSelectionWindow.show()
shape = imageData.shape
width = ddict['pixelwidth'] - 1
if ddict['mode'].upper() in ["HLINE", "HORIZONTAL"]:
xLabel = self.getXLabel()
deltaDistance = 1.0
if width < 1:
row = int(ddict['row'][0])
if row < 0:
row = 0
if row >= shape[0]:
row = shape[0] - 1
ydata = imageData[row, :]
legend = "Row = %d" % row
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([0.0, shape[1], shape[1], 0.0],
[row, row, row+1, row+1],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
else:
row0 = int(ddict['row'][0]) - 0.5 * width
if row0 < 0:
row0 = 0
row1 = row0 + width
else:
row1 = int(ddict['row'][0]) + 0.5 * width
if row1 >= shape[0]:
row1 = shape[0] - 1
row0 = max(0, row1 - width)
ydata = imageData[row0:int(row1+1), :].sum(axis=0)
legend = "Row = %d to %d" % (row0, row1)
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([0.0, 0.0, shape[1], shape[1]],
[row0, row1, row1, row0],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
xdata = numpy.arange(shape[1]).astype(numpy.float)
if self._xScale is not None:
xdata = self._xScale[0] + xdata * (self._xScale[1] - self._xScale[0]) / float(shape[1])
elif ddict['mode'].upper() in ["VLINE", "VERTICAL"]:
xLabel = self.getYLabel()
deltaDistance = 1.0
if width < 1:
column = int(ddict['column'][0])
if column < 0:
column = 0
if column >= shape[1]:
column = shape[1] - 1
ydata = imageData[:, column]
legend = "Column = %d" % column
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([column, column, column+1, column+1],
[0.0, shape[0], shape[0], 0.0],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
else:
col0 = int(ddict['column'][0]) - 0.5 * width
if col0 < 0:
col0 = 0
col1 = col0 + width
else:
col1 = int(ddict['column'][0]) + 0.5 * width
if col1 >= shape[1]:
col1 = shape[1] - 1
col0 = max(0, col1 - width)
ydata = imageData[:, col0:int(col1+1)].sum(axis=1)
legend = "Col = %d to %d" % (col0, col1)
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([col0, col0, col1, col1],
[0, shape[0], shape[0], 0.],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
xdata = numpy.arange(shape[0]).astype(numpy.float)
if self._yScale is not None:
xdata = self._yScale[0] + xdata * (self._yScale[1] - self._yScale[0]) / float(shape[0])
elif ddict['mode'].upper() in ["LINE"]:
if len(ddict['column']) == 1:
#only one point given
return
#the coordinates of the reference points
x0 = numpy.arange(float(shape[0]))
y0 = numpy.arange(float(shape[1]))
#get the interpolation points
col0, col1 = [int(x) for x in ddict['column']]
row0, row1 = [int(x) for x in ddict['row']]
deltaCol = abs(col0 - col1)
deltaRow = abs(row0 - row1)
if self.__lineProjectionMode == 'D':
if deltaCol >= deltaRow:
npoints = deltaCol + 1
else:
npoints = deltaRow + 1
elif self.__lineProjectionMode == 'X':
npoints = deltaCol + 1
else:
npoints = deltaRow + 1
if npoints == 1:
#all points are the same
if DEBUG:
print("START AND END POINT ARE THE SAME!!")
return
if width < 1:
x = numpy.zeros((npoints, 2), numpy.float)
x[:, 0] = numpy.linspace(row0, row1, npoints)
x[:, 1] = numpy.linspace(col0, col1, npoints)
legend = "From (%.3f, %.3f) to (%.3f, %.3f)" % (col0, row0, col1, row1)
#perform the interpolation
ydata = self._interpolate((x0, y0), imageData, x)
xdata = numpy.arange(float(npoints))
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([col0, col1],
[row0, row1],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
elif deltaCol == 0:
#vertical line
col0 = int(ddict['column'][0]) - 0.5 * width
if col0 < 0:
col0 = 0
col1 = col0 + width
else:
col1 = int(ddict['column'][0]) + 0.5 * width
if col1 >= shape[1]:
col1 = shape[1] - 1
col0 = max(0, col1 - width)
row0 = int(ddict['row'][0])
row1 = int(ddict['row'][1])
if row0 > row1:
tmp = row0
row0 = row1
row1 = tmp
if row0 < 0:
row0 = 0
if row1 >= shape[0]:
row1 = shape[0] - 1
ydata = imageData[row0:row1+1, col0:int(col1+1)].sum(axis=1)
legend = "Col = %d to %d" % (col0, col1)
npoints = max(ydata.shape)
xdata = numpy.arange(float(npoints))
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([col0, col0, col1, col1],
[row0, row1, row1, row0],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
elif deltaRow == 0:
#horizontal line
row0 = int(ddict['row'][0]) - 0.5 * width
if row0 < 0:
row0 = 0
row1 = row0 + width
else:
row1 = int(ddict['row'][0]) + 0.5 * width
if row1 >= shape[0]:
row1 = shape[0] - 1
row0 = max(0, row1 - width)
col0 = int(ddict['column'][0])
col1 = int(ddict['column'][1])
if col0 > col1:
tmp = col0
col0 = col1
col1 = tmp
if col0 < 0:
col0 = 0
if col1 >= shape[1]:
col1 = shape[1] - 1
ydata = imageData[row0:int(row1+1), col0:col1+1].sum(axis=0)
legend = "Row = %d to %d" % (row0, row1)
npoints = max(ydata.shape)
xdata = numpy.arange(float(npoints))
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem([col0, col0, col1, col1],
[row0, row1, row1, row0],
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
else:
#restore original value of width
width = ddict['pixelwidth']
#find m and b in the line y = mx + b
m = (row1 - row0) / float((col1 - col0))
b = row0 - m * col0
alpha = numpy.arctan(m)
#imagine the following sequence
# - change origin to the first point
# - clock-wise rotation to bring the line on the x axis of a new system
# so that the points (col0, row0) and (col1, row1) become (x0, 0) (x1, 0)
# - counter clock-wise rotation to get the points (x0, -0.5 width),
# (x0, 0.5 width), (x1, 0.5 * width) and (x1, -0.5 * width) back to the
# original system.
# - restore the origin to (0, 0)
# - if those extremes are inside the image the selection is acceptable
cosalpha = numpy.cos(alpha)
sinalpha = numpy.sin(alpha)
newCol0 = 0.0
newCol1 = (col1-col0) * cosalpha + (row1-row0) * sinalpha
newRow0 = 0.0
newRow1 = -(col1-col0) * sinalpha + (row1-row0) * cosalpha
if DEBUG:
print("new X0 Y0 = %f, %f " % (newCol0, newRow0))
print("new X1 Y1 = %f, %f " % (newCol1, newRow1))
tmpX = numpy.linspace(newCol0, newCol1, npoints).astype(numpy.float)
rotMatrix = numpy.zeros((2,2), numpy.float)
rotMatrix[0,0] = cosalpha
rotMatrix[0,1] = - sinalpha
rotMatrix[1,0] = sinalpha
rotMatrix[1,1] = cosalpha
if DEBUG:
#test if I recover the original points
testX = numpy.zeros((2, 1) , numpy.float)
colRow = numpy.dot(rotMatrix, testX)
print("Recovered X0 = %f" % (colRow[0,0] + col0))
print("Recovered Y0 = %f" % (colRow[1,0] + row0))
print("It should be = %f, %f" % (col0, row0))
testX[0,0] = newCol1
testX[1,0] = newRow1
colRow = numpy.dot(rotMatrix, testX)
print("Recovered X1 = %f" % (colRow[0,0] + col0))
print("Recovered Y1 = %f" % (colRow[1,0] + row0))
print("It should be = %f, %f" % (col1, row1))
#find the drawing limits
testX = numpy.zeros((2, 4) , numpy.float)
testX[0,0] = newCol0
testX[0,1] = newCol0
testX[0,2] = newCol1
testX[0,3] = newCol1
testX[1,0] = newRow0 - 0.5 * width
testX[1,1] = newRow0 + 0.5 * width
testX[1,2] = newRow1 + 0.5 * width
testX[1,3] = newRow1 - 0.5 * width
colRow = numpy.dot(rotMatrix, testX)
colLimits0 = colRow[0, :] + col0
rowLimits0 = colRow[1, :] + row0
for a in rowLimits0:
if (a >= shape[0]) or (a < 0):
print("outside row limits",a)
return
for a in colLimits0:
if (a >= shape[1]) or (a < 0):
print("outside column limits",a)
return
r0 = rowLimits0[0]
r1 = rowLimits0[1]
if r0 > r1:
print("r0 > r1", r0, r1)
raise ValueError("r0 > r1")
x = numpy.zeros((2, npoints) , numpy.float)
tmpMatrix = numpy.zeros((npoints, 2) , numpy.float)
if 0:
#take only the central point
oversampling = 1
x[0, :] = tmpX
x[1, :] = 0.0
colRow = numpy.dot(rotMatrix, x)
colRow[0, :] += col0
colRow[1, :] += row0
tmpMatrix[:,0] = colRow[1,:]
tmpMatrix[:,1] = colRow[0,:]
ydataCentral = self._interpolate((x0, y0),\
imageData, tmpMatrix)
#multiply by width too have the equivalent scale
ydata = ydataCentral
else:
if ddict['event'] == "PolygonSelected":
#oversampling solves noise introduction issues
oversampling = width + 1
oversampling = min(oversampling, 21)
else:
oversampling = 1
ncontributors = width * oversampling
iterValues = numpy.linspace(-0.5*width, 0.5*width, ncontributors)
tmpMatrix = numpy.zeros((npoints*len(iterValues), 2) , numpy.float)
x[0, :] = tmpX
offset = 0
for i in iterValues:
x[1, :] = i
colRow = numpy.dot(rotMatrix, x)
colRow[0, :] += col0
colRow[1, :] += row0
"""
colLimits = [colRow[0, 0], colRow[0, -1]]
rowLimits = [colRow[1, 0], colRow[1, -1]]
for a in rowLimits:
if (a >= shape[0]) or (a < 0):
print("outside row limits",a)
return
for a in colLimits:
if (a >= shape[1]) or (a < 0):
print("outside column limits",a)
return
"""
#it is much faster to make one call to the interpolating
#routine than making many calls
tmpMatrix[offset:(offset+npoints),0] = colRow[1,:]
tmpMatrix[offset:(offset+npoints),1] = colRow[0,:]
offset += npoints
ydata = self._interpolate((x0, y0),\
imageData, tmpMatrix)
ydata.shape = len(iterValues), npoints
ydata = ydata.sum(axis=0)
#deal with the oversampling
ydata /= oversampling
xdata = numpy.arange(float(npoints))
legend = "y = %f (x-%.1f) + %f ; width=%d" % (m, col0, b, width)
if overlay:
#self.drawOverlayItem(x, y, legend=name, info=info, replot, replace)
self.drawOverlayItem(colLimits0,
rowLimits0,
legend=ddict['mode'],
info=ddict,
replace=True,
replot=True)
if self.__lineProjectionMode == 'X':
xLabel = self.getXLabel()
xdata += col0
if self._xScale is not None:
xdata = self._xScale[0] + xdata * (self._xScale[1] - self._xScale[0]) / float(shape[1])
elif self.__lineProjectionMode == 'Y':
xLabel = self.getYLabel()
xdata += row0
if self._xScale is not None:
xdata = self._yScale[0] + xdata * (self._yScale[1] - self._yScale[0]) / float(shape[0])
else:
xLabel = "Distance"
if self._xScale is not None:
deltaCol *= (self._xScale[1] - self._xScale[0])/float(shape[1])
deltaRow *= (self._yScale[1] - self._yScale[0])/float(shape[0])
#get the abscisa in distance units
deltaDistance = numpy.sqrt(float(deltaCol) * deltaCol +
float(deltaRow) * deltaRow)/(npoints-1.0)
xdata *= deltaDistance
else:
if DEBUG:
print("Mode %s not supported yet" % ddict['mode'])
return
info = {}
info['xlabel'] = xLabel
info['ylabel'] = "Z"
return xdata, ydata, legend, info
def _profileSelectionSlot(self, ddict):
if DEBUG:
print(ddict)
# the curves as [[x0, y0, legend0, info0], ...]
curveList = ddict['curves']
label = ddict['label']
n = len(curveList)
if ddict['event'] == 'ADD':
for i in range(n):
x, y, legend, info = curveList[i]
info['profilelabel'] = label
if i == (n-1):
replot = True
self._profileScanWindow.addCurve(x, y, legend=legend, info=info,
replot=replot, replace=False)
elif ddict['event'] == 'REPLACE':
for i in range(n):
x, y, legend, info = curveList[i]
info['profilelabel'] = label
if i in [0, n-1]:
replace = True
else:
replace = False
if i == (n-1):
replot = True
else:
replot = False
self._profileScanWindow.addCurve(x, y, legend=legend, info=info,
replot=replot, replace=replace)
elif ddict['event'] == 'REMOVE':
curveList = self._profileScanWindow.getAllCurves()
if curveList in [None, []]:
return
toDelete = []
n = len(curveList)
for i in range(n):
x, y, legend, info = curveList[i]
curveLabel = info.get('profilelabel', None)
if curveLabel is not None:
if label == curveLabel:
toDelete.append(legend)
n = len(toDelete)
for i in range(n):
legend = toDelete[i]
if i == (n-1):
replot = True
else:
replot = False
self._profileScanWindow.removeCurve(legend, replot=replot)
def drawOverlayItem(self, x, y, legend=None, info=None, replace=False, replot=True):
#same call as the plot1D addCurve command
if legend is None:
legend="UnnamedOverlayItem"
if legend not in self._overlayItemsDict:
overlayItem = QwtPlotItems.PolygonItem(legend)
overlayItem.attach(self.graphWidget.graph)
self._overlayItemsDict[legend] = {}
self._overlayItemsDict[legend]['item'] = overlayItem
else:
overlayItem = self._overlayItemsDict[legend]['item']
if replace:
iterKeys = list(self._overlayItemsDict.keys())
for name in iterKeys:
if name == legend:
continue
self._overlayItemsDict[name]['item'].detach()
delKeys = list(self._overlayItemsDict[name].keys())
for key in delKeys:
del self._overlayItemsDict[name][key]
del self._overlayItemsDict[name]
#the type of x can be list or array
shape = self.__imageData.shape
if self._xScale is None:
xList = x
else:
xList = []
for i in x:
xList.append(self._xScale[0] + i * (self._xScale[1] - self._xScale[0])/float(shape[1]))
if self._yScale is None:
yList = y
else:
yList = []
for i in y:
yList.append(self._yScale[0] + i * (self._yScale[1] - self._yScale[0])/float(shape[0]))
overlayItem.setData(xList, yList)
self._overlayItemsDict[legend]['x'] = xList
self._overlayItemsDict[legend]['y'] = yList
self._overlayItemsDict[legend]['info'] = info
if replot:
self.graphWidget.graph.replot()
self.__lastOverlayLegend = legend
def _hFlipIconSignal(self):
if not self.graphWidget.graph.yAutoScale:
qt.QMessageBox.information(self, "Open",
"Please set Y Axis to AutoScale first")
return
if not self.graphWidget.graph.xAutoScale:
qt.QMessageBox.information(self, "Open",
"Please set X Axis to AutoScale first")
return
if self._y1AxisInverted:
self._y1AxisInverted = False
else:
self._y1AxisInverted = True
self.graphWidget.graph.zoomReset()
self.graphWidget.graph.setY1AxisInverted(self._y1AxisInverted)
self.plotImage(True)
#inform the other widgets
ddict = {}
ddict['event'] = "hFlipSignal"
ddict['current'] = self._y1AxisInverted * 1
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def setY1AxisInverted(self, value):
self._y1AxisInverted = value
self.graphWidget.graph.setY1AxisInverted(self._y1AxisInverted)
def setXLabel(self, label="Column"):
return self.graphWidget.setXLabel(label)
def setYLabel(self, label="Row"):
return self.graphWidget.setYLabel(label)
def getXLabel(self):
return self.graphWidget.getXLabel()
def getYLabel(self):
return self.graphWidget.getYLabel()
def buildAndConnectImageButtonBox(self, replace=True):
# The IMAGE selection
self.imageButtonBox = qt.QWidget(self)
buttonBox = self.imageButtonBox
self.imageButtonBoxLayout = qt.QHBoxLayout(buttonBox)
self.imageButtonBoxLayout.setMargin(0)
self.imageButtonBoxLayout.setSpacing(0)
self.addImageButton = qt.QPushButton(buttonBox)
icon = qt.QIcon(qt.QPixmap(IconDict["rgb16"]))
self.addImageButton.setIcon(icon)
self.addImageButton.setText("ADD IMAGE")
self.removeImageButton = qt.QPushButton(buttonBox)
self.removeImageButton.setIcon(icon)
self.removeImageButton.setText("REMOVE IMAGE")
self.imageButtonBoxLayout.addWidget(self.addImageButton)
self.imageButtonBoxLayout.addWidget(self.removeImageButton)
self.mainLayout.addWidget(buttonBox)
self.connect(self.addImageButton, qt.SIGNAL("clicked()"),
self._addImageClicked)
self.connect(self.removeImageButton, qt.SIGNAL("clicked()"),
self._removeImageClicked)
if replace:
self.replaceImageButton = qt.QPushButton(buttonBox)
self.replaceImageButton.setIcon(icon)
self.replaceImageButton.setText("REPLACE IMAGE")
self.imageButtonBoxLayout.addWidget(self.replaceImageButton)
self.connect(self.replaceImageButton,
qt.SIGNAL("clicked()"),
self._replaceImageClicked)
def _setEraseSelectionMode(self):
if DEBUG:
print("_setEraseSelectionMode")
self.__eraseMode = True
self.__brushMode = True
self.graphWidget.picker.setTrackerMode(Qwt5.QwtPicker.ActiveOnly)
self.graphWidget.graph.enableSelection(False)
def _setRectSelectionMode(self):
if DEBUG:
print("_setRectSelectionMode")
self.__eraseMode = False
self.__brushMode = False
self.graphWidget.picker.setTrackerMode(Qwt5.QwtPicker.AlwaysOn)
self.graphWidget.graph.enableSelection(True)
def _setBrushSelectionMode(self):
if DEBUG:
print("_setBrushSelectionMode")
self.__eraseMode = False
self.__brushMode = True
self.graphWidget.picker.setTrackerMode(Qwt5.QwtPicker.ActiveOnly)
self.graphWidget.graph.enableSelection(False)
def _setBrush(self):
if DEBUG:
print("_setBrush")
if self.__brushMenu is None:
if QTVERSION < '4.0.0':
self.__brushMenu = qt.QPopupMenu()
self.__brushMenu.insertItem(QString(" 1 Image Pixel Width"),
self.__setBrush1)
self.__brushMenu.insertItem(QString(" 2 Image Pixel Width"),
self.__setBrush2)
self.__brushMenu.insertItem(QString(" 3 Image Pixel Width"),
self.__setBrush3)
self.__brushMenu.insertItem(QString(" 5 Image Pixel Width"),
self.__setBrush4)
self.__brushMenu.insertItem(QString("10 Image Pixel Width"),
self.__setBrush5)
self.__brushMenu.insertItem(QString("20 Image Pixel Width"),
self.__setBrush6)
else:
self.__brushMenu = qt.QMenu()
self.__brushMenu.addAction(QString(" 1 Image Pixel Width"),
self.__setBrush1)
self.__brushMenu.addAction(QString(" 2 Image Pixel Width"),
self.__setBrush2)
self.__brushMenu.addAction(QString(" 3 Image Pixel Width"),
self.__setBrush3)
self.__brushMenu.addAction(QString(" 5 Image Pixel Width"),
self.__setBrush4)
self.__brushMenu.addAction(QString("10 Image Pixel Width"),
self.__setBrush5)
self.__brushMenu.addAction(QString("20 Image Pixel Width"),
self.__setBrush6)
if QTVERSION < '4.0.0':
self.__brushMenu.exec_loop(self.cursor().pos())
else:
self.__brushMenu.exec_(self.cursor().pos())
def __setBrush1(self):
self.__brushWidth = 1
def __setBrush2(self):
self.__brushWidth = 2
def __setBrush3(self):
self.__brushWidth = 3
def __setBrush4(self):
self.__brushWidth = 5
def __setBrush5(self):
self.__brushWidth = 10
def __setBrush6(self):
self.__brushWidth = 20
def _toggleSelectionMode(self):
if self.graphWidget.graph._selecting:
self.setSelectionMode(False)
else:
self.setSelectionMode(True)
def setSelectionMode(self, mode = None):
#does it have sense to enable the selection without the image selection icons?
#if not self.__imageIconsFlag:
# mode = False
if mode:
self.graphWidget.graph.enableSelection(True)
self.__brushMode = False
self.graphWidget.picker.setTrackerMode(Qwt5.QwtPicker.AlwaysOn)
if QTVERSION < '4.0.0':
self.graphWidget.selectionToolButton.setState(qt.QButton.On)
else:
self.graphWidget.hideProfileSelectionIcons()
self.graphWidget.selectionToolButton.setChecked(True)
self.graphWidget.graph.enableZoom(False)
self.graphWidget.selectionToolButton.setDown(True)
self.graphWidget.showImageIcons()
else:
self.graphWidget.picker.setTrackerMode(Qwt5.QwtPicker.AlwaysOff)
self.graphWidget.showProfileSelectionIcons()
self.graphWidget.graph.enableZoom(True)
if QTVERSION < '4.0.0':
self.graphWidget.selectionToolButton.setState(qt.QButton.Off)
else:
self.graphWidget.selectionToolButton.setChecked(False)
self.graphWidget.selectionToolButton.setDown(False)
self.graphWidget.hideImageIcons()
if self.__imageData is None: return
#do not reset the selection
#self.__selectionMask = numpy.zeros(self.__imageData.shape, numpy.UInt8)
def _additionalSelectionMenuDialog(self):
if self.__imageData is None:
return
self._additionalSelectionMenu.exec_(self.cursor().pos())
def _getSelectionMinMax(self):
if self.colormap is None:
goodData = self.__imageData[numpy.isfinite(self.__imageData)]
maxValue = goodData.max()
minValue = goodData.min()
else:
minValue = self.colormap[2]
maxValue = self.colormap[3]
return minValue, maxValue
def _selectMax(self):
selectionMask = numpy.zeros(self.__imageData.shape,
numpy.uint8)
minValue, maxValue = self._getSelectionMinMax()
tmpData = numpy.array(self.__imageData, copy=True)
tmpData[True - numpy.isfinite(self.__imageData)] = minValue
selectionMask[tmpData >= maxValue] = 1
self.setSelectionMask(selectionMask, plot=False)
self.plotImage(update=False)
self._emitMaskChangedSignal()
def _selectMiddle(self):
selectionMask = numpy.ones(self.__imageData.shape,
numpy.uint8)
minValue, maxValue = self._getSelectionMinMax()
tmpData = numpy.array(self.__imageData, copy=True)
tmpData[True - numpy.isfinite(self.__imageData)] = maxValue
selectionMask[tmpData >= maxValue] = 0
selectionMask[tmpData <= minValue] = 0
self.setSelectionMask(selectionMask, plot=False)
self.plotImage(update=False)
self._emitMaskChangedSignal()
def _selectMin(self):
selectionMask = numpy.zeros(self.__imageData.shape,
numpy.uint8)
minValue, maxValue = self._getSelectionMinMax()
tmpData = numpy.array(self.__imageData, copy=True)
tmpData[True - numpy.isfinite(self.__imageData)] = maxValue
selectionMask[tmpData <= minValue] = 1
self.setSelectionMask(selectionMask, plot=False)
self.plotImage(update=False)
self._emitMaskChangedSignal()
def _invertSelection(self):
if self.__imageData is None:
return
mask = numpy.ones(self.__imageData.shape,
numpy.uint8)
if self.__selectionMask is not None:
mask[self.__selectionMask > 0] = 0
self.setSelectionMask(mask, plot=True)
self._emitMaskChangedSignal()
def _resetSelection(self, owncall=True):
if DEBUG:
print("_resetSelection")
self.__selectionMask = None
if self.__imageData is None:
return
#self.__selectionMask = numpy.zeros(self.__imageData.shape, numpy.uint8)
self.plotImage(update = True)
#inform the others
if owncall:
ddict = {}
ddict['event'] = "resetSelection"
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def setSelectionMask(self, mask, plot=True):
if mask is not None:
if self.__imageData is not None:
# this operation will be made when retrieving the mask
#mask *= numpy.isfinite(self.__imageData)
pass
self.__selectionMask = mask
if plot:
self.plotImage(update=False)
def getSelectionMask(self):
if self.__imageData is None:
return None
if self.__selectionMask is None:
return numpy.zeros(self.__imageData.shape, numpy.uint8) *\
numpy.isfinite(self.__imageData)
return self.__selectionMask *\
numpy.isfinite(self.__imageData)
def setImageData(self, data, clearmask=False, xScale=None, yScale=None):
self.__image = None
self._xScale = xScale
self._yScale = yScale
if data is None:
self.__imageData = data
self.__selectionMask = None
self.plotImage(update = True)
return
else:
self.__imageData = data
if clearmask:
self.__selectionMask = None
if self.colormapDialog is not None:
goodData = self.__imageData[numpy.isfinite(self.__imageData)]
minData = goodData.min()
maxData = goodData.max()
if self.colormapDialog.autoscale:
self.colormapDialog.setDisplayedMinValue(minData)
self.colormapDialog.setDisplayedMaxValue(maxData)
self.colormapDialog.setDataMinMax(minData, maxData, update=True)
else:
self.plotImage(update = True)
def getImageData(self):
return self.__imageData
def getQImage(self):
return self.__image
def setQImage(self, qimage, width, height, clearmask=False, data=None):
#This is just to get it different than None
if (qimage.width() != width) or (qimage.height() != height):
if 1 or (qimage.width() > width) or (qimage.height() > height):
transformation = qt.Qt.SmoothTransformation
else:
transformation = qt.Qt.FastTransformation
self.__image = qimage.scaled(qt.QSize(width, height),
qt.Qt.IgnoreAspectRatio,
transformation)
else:
self.__image = qimage
if self.__image.format() == qt.QImage.Format_Indexed8:
pixmap0 = numpy.fromstring(qimage.bits().asstring(width * height),
dtype = numpy.uint8)
pixmap = numpy.zeros((height * width, 4), numpy.uint8)
pixmap[:,0] = pixmap0[:]
pixmap[:,1] = pixmap0[:]
pixmap[:,2] = pixmap0[:]
pixmap[:,3] = 255
pixmap.shape = height, width, 4
else:
self.__image = self.__image.convertToFormat(qt.QImage.Format_ARGB32)
pixmap = numpy.fromstring(self.__image.bits().asstring(width * height * 4),
dtype = numpy.uint8)
pixmap.shape = height, width,-1
if data is None:
self.__imageData = numpy.zeros((height, width), numpy.float)
self.__imageData = pixmap[:,:,0] * 0.114 +\
pixmap[:,:,1] * 0.587 +\
pixmap[:,:,2] * 0.299
else:
self.__imageData = data
self.__imageData.shape = height, width
self._xScale = None
self._yScale = None
self.__pixmap0 = pixmap
if clearmask:
self.__selectionMask = None
self.plotImage(update = True)
def plotImage(self, update=True):
if self.__imageData is None:
self.graphWidget.graph.clear()
self.graphWidget.picker.data = None
self.graphWidget.picker.xScale = None
self.graphWidget.picker.yScale = None
return
if update:
self.getPixmapFromData()
self.__pixmap0 = self.__pixmap.copy()
self.graphWidget.picker.data = self.__imageData
self.graphWidget.picker.xScale = self._xScale
self.graphWidget.picker.yScale = self._yScale
if self.colormap is None:
if self.__defaultColormap < 2:
self.graphWidget.picker.setTrackerPen(qt.QPen(qt.Qt.green))
else:
self.graphWidget.picker.setTrackerPen(qt.QPen(qt.Qt.black))
elif int(str(self.colormap[0])) > 1: #color
self.graphWidget.picker.setTrackerPen(qt.QPen(qt.Qt.black))
else:
self.graphWidget.picker.setTrackerPen(qt.QPen(qt.Qt.green))
self.__applyMaskToImage()
if not self.graphWidget.graph.yAutoScale:
ylimits = self.graphWidget.graph.getY1AxisLimits()
if not self.graphWidget.graph.xAutoScale:
xlimits = self.graphWidget.graph.getX1AxisLimits()
self.graphWidget.graph.pixmapPlot(self.__pixmap.tostring(),
(self.__imageData.shape[1], self.__imageData.shape[0]),
xmirror = 0,
ymirror = not self._y1AxisInverted,
xScale = self._xScale,
yScale = self._yScale)
if not self.graphWidget.graph.yAutoScale:
self.graphWidget.graph.setY1AxisLimits(ylimits[0], ylimits[1],
replot=False)
if not self.graphWidget.graph.xAutoScale:
self.graphWidget.graph.setX1AxisLimits(xlimits[0], xlimits[1],
replot=False)
self.graphWidget.graph.replot()
def getPixmapFromData(self):
colormap = self.colormap
if self.__image is not None:
self.__pixmap = self.__pixmap0.copy()
return
if hasattr(self.__imageData, 'mask'):
data = self.__imageData.data
else:
data = self.__imageData
finiteData = numpy.isfinite(data)
goodData = finiteData.min()
if self.colormapDialog is not None:
minData = self.colormapDialog.dataMin
maxData = self.colormapDialog.dataMax
else:
if goodData:
minData = data.min()
maxData = data.max()
else:
tmpData = data[finiteData]
if tmpData.size > 0:
minData = tmpData.min()
maxData = tmpData.max()
else:
minData = None
maxData = None
tmpData = None
if colormap is None:
if minData is None:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(self.__defaultColormapType,3.0),
"BGRX",
self.__defaultColormap,
1,
(0, 1),
(0, 255), 1)
else:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(self.__defaultColormapType,3.0),
"BGRX",
self.__defaultColormap,
0,
(minData,maxData),
(0, 255), 1)
else:
if len(colormap) < 7:
colormap.append(spslut.LINEAR)
if goodData:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(colormap[6],3.0),
"BGRX",
COLORMAPLIST[int(str(colormap[0]))],
colormap[1],
(colormap[2],colormap[3]),
(0,255), 1)
elif colormap[1]:
#autoscale
if minData is None:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(self.__defaultColormapType,3.0),
"BGRX",
self.__defaultColormap,
1,
(0, 1),
(0, 255), 1)
else:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(colormap[6],3.0),
"BGRX",
COLORMAPLIST[int(str(colormap[0]))],
0,
(minData,maxData),
(0,255), 1)
else:
(self.__pixmap,size,minmax)= spslut.transform(\
data,
(1,0),
(colormap[6],3.0),
"BGRX",
COLORMAPLIST[int(str(colormap[0]))],
colormap[1],
(colormap[2],colormap[3]),
(0,255), 1)
self.__pixmap = self.__pixmap.astype(numpy.ubyte)
self.__pixmap.shape = [data.shape[0], data.shape[1], 4]
if not goodData:
self.__pixmap[finiteData < 1] = 255
def __applyMaskToImage(self):
if self.__selectionMask is None:
return
#if not self.__selectionFlag:
# print("Return because of selection flag")
# return
if self.colormap is None:
if self.__image is not None:
if self.__image.format() == qt.QImage.Format_ARGB32:
for i in range(4):
self.__pixmap[:,:,i] = (self.__pixmap0[:,:,i] *\
(1 - (0.2 * self.__selectionMask))).astype(numpy.uint8)
else:
self.__pixmap = self.__pixmap0.copy()
self.__pixmap[self.__selectionMask>0,0] = 0x40
self.__pixmap[self.__selectionMask>0,2] = 0x70
self.__pixmap[self.__selectionMask>0,3] = 0x40
else:
if self.__defaultColormap > 1:
tmp = 1 - 0.2 * self.__selectionMask
for i in range(3):
self.__pixmap[:,:,i] = (self.__pixmap0[:,:,i] *\
tmp)
if 0:
#this is to recolor non finite points
tmpMask = numpy.isfinite(self.__imageData)
goodData = numpy.isfinite(self.__imageData).min()
if not goodData:
for i in range(3):
self.__pixmap[:,:,i] *= tmpMask
else:
self.__pixmap = self.__pixmap0.copy()
self.__pixmap[self.__selectionMask>0,0] = 0x40
self.__pixmap[self.__selectionMask>0,2] = 0x70
self.__pixmap[self.__selectionMask>0,3] = 0x40
if 0:
#this is to recolor non finite points
tmpMask = ~numpy.isfinite(self.__imageData)
badData = numpy.isfinite(self.__imageData).max()
if badData:
self.__pixmap[tmpMask,0] = 0x00
self.__pixmap[tmpMask,1] = 0xff
self.__pixmap[tmpMask,2] = 0xff
self.__pixmap[tmpMask,3] = 0xff
elif int(str(self.colormap[0])) > 1: #color
tmp = 1 - 0.2 * self.__selectionMask
for i in range(3):
self.__pixmap[:,:,i] = (self.__pixmap0[:,:,i] *\
tmp)
if 0:
tmpMask = numpy.isfinite(self.__imageData)
goodData = numpy.isfinite(self.__imageData).min()
if not goodData:
if not goodData:
for i in range(3):
self.__pixmap[:,:,i] *= tmpMask
else:
self.__pixmap = self.__pixmap0.copy()
tmp = 1 - self.__selectionMask
self.__pixmap[:,:, 2] = (0x70 * self.__selectionMask) +\
tmp * self.__pixmap0[:,:,2]
self.__pixmap[:,:, 3] = (0x40 * self.__selectionMask) +\
tmp * self.__pixmap0[:,:,3]
if 0:
tmpMask = ~numpy.isfinite(self.__imageData)
badData = numpy.isfinite(self.__imageData).max()
if badData:
self.__pixmap[tmpMask,0] = 0x00
self.__pixmap[tmpMask,1] = 0xff
self.__pixmap[tmpMask,2] = 0xff
self.__pixmap[tmpMask,3] = 0xff
return
def selectColormap(self):
if self.__imageData is None:
return
if self.colormapDialog is None:
self.__initColormapDialog()
if self.colormapDialog is None:
return
if self.colormapDialog.isHidden():
self.colormapDialog.show()
if QTVERSION < '4.0.0':
self.colormapDialog.raiseW()
else:
self.colormapDialog.raise_()
self.colormapDialog.show()
def __initColormapDialog(self):
goodData = self.__imageData[numpy.isfinite(self.__imageData)]
if goodData.size > 0:
maxData = goodData.max()
minData = goodData.min()
else:
qt.QMessageBox.critical(self,"No Data",
"Image data does not contain any real value")
return
self.colormapDialog = ColormapDialog.ColormapDialog()
colormapIndex = self.__defaultColormap
if colormapIndex == 1:
colormapIndex = 0
elif colormapIndex == 6:
colormapIndex = 1
self.colormapDialog.colormapIndex = colormapIndex
self.colormapDialog.colormapString = self.colormapDialog.colormapList[colormapIndex]
self.colormapDialog.setDataMinMax(minData, maxData)
self.colormapDialog.setAutoscale(1)
self.colormapDialog.setColormap(self.colormapDialog.colormapIndex)
self.colormapDialog.setColormapType(self.__defaultColormapType, update=False)
self.colormap = (self.colormapDialog.colormapIndex,
self.colormapDialog.autoscale,
self.colormapDialog.minValue,
self.colormapDialog.maxValue,
minData, maxData)
if QTVERSION < '4.0.0':
self.colormapDialog.setCaption("Colormap Dialog")
self.connect(self.colormapDialog,
qt.PYSIGNAL("ColormapChanged"),
self.updateColormap)
else:
self.colormapDialog.setWindowTitle("Colormap Dialog")
self.connect(self.colormapDialog,
qt.SIGNAL("ColormapChanged"),
self.updateColormap)
self.colormapDialog._update()
def updateColormap(self, *var):
if len(var) > 6:
self.colormap = [var[0],
var[1],
var[2],
var[3],
var[4],
var[5],
var[6]]
elif len(var) > 5:
self.colormap = [var[0],
var[1],
var[2],
var[3],
var[4],
var[5]]
else:
self.colormap = [var[0],
var[1],
var[2],
var[3],
var[4],
var[5]]
self.plotImage(True)
def _addImageClicked(self):
ddict = {}
ddict['event'] = "addImageClicked"
ddict['image'] = self.__imageData
ddict['title'] = self.getGraphTitle()
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def _removeImageClicked(self):
ddict = {}
ddict['event'] = "removeImageClicked"
ddict['title'] = self.getGraphTitle()
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def _replaceImageClicked(self):
ddict = {}
ddict['event'] = "replaceImageClicked"
ddict['image'] = self.__imageData
ddict['title'] = self.getGraphTitle()
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def _saveToolButtonSignal(self):
self._saveMenu.exec_(self.cursor().pos())
def _saveMatplotlibImage(self):
imageData = self.__imageData
if self._matplotlibSaveImage is None:
self._matplotlibSaveImage = QPyMcaMatplotlibSave.SaveImageSetup(None,
image=None)
title = "Matplotlib " + self.getGraphTitle()
self._matplotlibSaveImage.setWindowTitle(title)
ddict = self._matplotlibSaveImage.getParameters()
if self.colormap is not None:
colormapType = ddict['linlogcolormap']
try:
colormapIndex, autoscale, vmin, vmax,\
dataMin, dataMax, colormapType = self.colormap
if colormapType == spslut.LOG:
colormapType = 'logarithmic'
else:
colormapType = 'linear'
except:
colormapIndex, autoscale, vmin, vmax = self.colormap[0:4]
ddict['linlogcolormap'] = colormapType
if not autoscale:
ddict['valuemin'] = vmin
ddict['valuemax'] = vmax
else:
ddict['valuemin'] = 0
ddict['valuemax'] = 0
#this sets the actual dimensions
if self._xScale is not None:
ddict['xorigin'] = self._xScale[0]
ddict['xpixelsize'] = (self._xScale[1] - self._xScale[0])/\
float(imageData.shape[1])
if self._yScale is not None:
ddict['yorigin'] = self._yScale[0]
ddict['ypixelsize'] = (self._yScale[1] - self._yScale[0])/\
float(imageData.shape[0])
ddict['xlabel'] = self.getXLabel()
ddict['ylabel'] = self.getYLabel()
limits = self.graphWidget.graph.getX1AxisLimits()
ddict['zoomxmin'] = limits[0]
ddict['zoomxmax'] = limits[1]
limits = self.graphWidget.graph.getY1AxisLimits()
ddict['zoomymin'] = limits[0]
ddict['zoomymax'] = limits[1]
self._matplotlibSaveImage.setParameters(ddict)
self._matplotlibSaveImage.setImageData(imageData)
self._matplotlibSaveImage.show()
self._matplotlibSaveImage.raise_()
def _otherWidgetGraphSignal(self, ddict):
self._graphSignal(ddict, ownsignal = False)
def _graphSignal(self, ddict, ownsignal = None):
if ownsignal is None:
ownsignal = True
emitsignal = False
if self.__imageData is None:
return
if ddict['event'] == "MouseSelection":
if ddict['column_min'] < ddict['column_max']:
xmin = ddict['column_min']
xmax = ddict['column_max']
else:
xmin = ddict['column_max']
xmax = ddict['column_min']
if ddict['row_min'] < ddict['row_max']:
ymin = ddict['row_min']
ymax = ddict['row_max']
else:
ymin = ddict['row_max']
ymax = ddict['row_min']
"""
if not (self._xScale is None and self._yScale is None):
ymin, xmin = convertToRowAndColumn(xmin, ymin, self.__imageData.shape,
xScale=self._xScale,
yScale=self._yScale,
safe=True)
ymax, xmax = convertToRowAndColumn(xmax, ymax, self.__imageData.shape,
xScale=self._xScale,
yScale=self._yScale,
safe=True)
"""
i1 = max(int(round(xmin)), 0)
i2 = min(abs(int(round(xmax))) + 1, self.__imageData.shape[1])
j1 = max(int(round(ymin)),0)
j2 = min(abs(int(round(ymax))) + 1, self.__imageData.shape[0])
if self.__selectionMask is None:
self.__selectionMask = numpy.zeros(self.__imageData.shape,
numpy.uint8)
self.__selectionMask[j1:j2, i1:i2] = 1
emitsignal = True
elif ddict['event'] == "MouseAt":
if ownsignal:
pass
if self.__brushMode:
if self.graphWidget.graph.isZoomEnabled():
return
#if follow mouse is not activated
#it only enters here when the mouse is pressed.
#Therefore is perfect for "brush" selections.
"""
if not (self._xScale is None and self._yScale is None):
y, x = convertToRowAndColumn(ddict['x'], ddict['y'], self.__imageData.shape,
xScale=self._xScale,
yScale=self._yScale,
safe=True)
else:
x = ddict['x']
y = ddict['y']
"""
y = ddict['row']
x = ddict['column']
width = self.__brushWidth #in (row, column) units
r = self.__imageData.shape[0]
c = self.__imageData.shape[1]
xmin = max((x-0.5*width), 0)
xmax = min((x+0.5*width), c)
ymin = max((y-0.5*width), 0)
ymax = min((y+0.5*width), r)
i1 = min(int(round(xmin)), c-1)
i2 = min(int(round(xmax)), c)
j1 = min(int(round(ymin)),r-1)
j2 = min(int(round(ymax)), r)
if i1 == i2:
i2 = i1+1
if j1 == j2:
j2 = j1+1
if self.__selectionMask is None:
self.__selectionMask = numpy.zeros(self.__imageData.shape,
numpy.uint8)
if self.__eraseMode:
self.__selectionMask[j1:j2, i1:i2] = 0
else:
self.__selectionMask[j1:j2, i1:i2] = 1
emitsignal = True
if emitsignal:
#should this be made by the parent?
self.plotImage(update = False)
#inform the other widgets
self._emitMaskChangedSignal()
def _emitMaskChangedSignal(self):
#inform the other widgets
ddict = {}
ddict['event'] = "selectionMaskChanged"
ddict['current'] = self.__selectionMask * 1
ddict['id'] = id(self)
self.emitMaskImageSignal(ddict)
def emitMaskImageSignal(self, ddict):
if QTVERSION < '4.0.0':
qt.QObject.emit(self,
qt.PYSIGNAL('MaskImageWidgetSignal'),
ddict)
else:
qt.QObject.emit(self,
qt.SIGNAL('MaskImageWidgetSignal'),
ddict)
def _zoomResetSignal(self):
if DEBUG:
print("_zoomResetSignal")
self.graphWidget._zoomReset(replot=False)
self.plotImage(True)
def getOutputFileName(self):
initdir = PyMcaDirs.outputDir
if self.outputDir is not None:
if os.path.exists(self.outputDir):
initdir = self.outputDir
filedialog = qt.QFileDialog(self)
filedialog.setFileMode(filedialog.AnyFile)
filedialog.setAcceptMode(qt.QFileDialog.AcceptSave)
filedialog.setWindowIcon(qt.QIcon(qt.QPixmap(IconDict["gioconda16"])))
formatlist = ["ASCII Files *.dat",
"EDF Files *.edf",
'CSV(, separated) Files *.csv',
'CSV(; separated) Files *.csv',
'CSV(tab separated) Files *.csv']
if hasattr(qt, "QStringList"):
strlist = qt.QStringList()
else:
strlist = []
for f in formatlist:
strlist.append(f)
if self._saveFilter is None:
self._saveFilter =formatlist[0]
filedialog.setFilters(strlist)
filedialog.selectFilter(self._saveFilter)
filedialog.setDirectory(initdir)
ret = filedialog.exec_()
if not ret:
return ""
filename = filedialog.selectedFiles()[0]
if len(filename):
filename = qt.safe_str(filename)
self.outputDir = os.path.dirname(filename)
self._saveFilter = qt.safe_str(filedialog.selectedFilter())
filterused = "."+self._saveFilter[-3:]
PyMcaDirs.outputDir = os.path.dirname(filename)
if len(filename) < 4:
filename = filename+ filterused
elif filename[-4:] != filterused :
filename = filename+ filterused
else:
filename = ""
return filename
def saveImageList(self, filename=None, imagelist=None, labels=None):
imageList = []
if labels is None:
labels = []
if imagelist is None:
if self.__imageData is not None:
imageList.append(self.__imageData)
label = self.getGraphTitle()
label.replace(' ', '_')
labels.append(label)
if self.__selectionMask is not None:
if self.__selectionMask.max() > 0:
imageList.append(self.__selectionMask)
labels.append(label+"_Mask")
else:
imageList = imagelist
if len(labels) == 0:
for i in range(len(imagelist)):
labels.append("Image%02d" % i)
if not len(imageList):
qt.QMessageBox.information(self,"No Data",
"Image list is empty.\nNothing to be saved")
return
if filename is None:
filename = self.getOutputFileName()
if not len(filename):return
if filename.lower().endswith(".edf"):
ArraySave.save2DArrayListAsEDF(imageList, filename, labels)
elif filename.lower().endswith(".csv"):
if "," in self._saveFilter:
csvseparator = ","
elif ";" in self._saveFilter:
csvseparator = ";"
else:
csvseparator = "\t"
ArraySave.save2DArrayListAsASCII(imageList, filename, labels,
csv=True,
csvseparator=csvseparator)
else:
ArraySave.save2DArrayListAsASCII(imageList, filename, labels,
csv=False)
def saveClippedSeenImageList(self):
return self.saveClippedAndSubtractedSeenImageList(subtract=False)
def saveClippedAndSubtractedSeenImageList(self, subtract=True):
imageData = self.__imageData
if imageData is None:
return
vmin = None
label = self.getGraphTitle()
if not len(label):
label = "Image01"
if self.colormap is not None:
colormapIndex, autoscale, vmin, vmax = self.colormap[0:4]
if not autoscale:
imageData = imageData.clip(vmin, vmax)
label += ".clip(%f,%f)" % (vmin, vmax)
if subtract:
if vmin is None:
vmin = imageData.min()
imageData = imageData-vmin
label += "-%f" % vmin
imageList = [imageData]
labelList = [label]
if self.__selectionMask is not None:
if self.__selectionMask.max() > 0:
imageList.append(self.__selectionMask)
labelList.append(label+"_Mask")
self.saveImageList(filename=None,
imagelist=imageList,
labels=labelList)
def setDefaultColormap(self, colormapindex, logflag=False):
self.__defaultColormap = COLORMAPLIST[min(colormapindex, len(COLORMAPLIST)-1)]
if logflag:
self.__defaultColormapType = spslut.LOG
else:
self.__defaultColormapType = spslut.LINEAR
def closeEvent(self, event):
if self._profileSelectionWindow is not None:
self._profileSelectionWindow.close()
if self.colormapDialog is not None:
self.colormapDialog.close()
return qt.QWidget.closeEvent(self, event)
def setInfoText(self, text):
return self.graphWidget.setInfoText(text)
def test():
app = qt.QApplication([])
qt.QObject.connect(app,
qt.SIGNAL("lastWindowClosed()"),
app,
qt.SLOT('quit()'))
container = MaskImageWidget()
if len(sys.argv) > 1:
if sys.argv[1].endswith('edf') or\
sys.argv[1].endswith('cbf') or\
sys.argv[1].endswith('ccd') or\
sys.argv[1].endswith('spe') or\
sys.argv[1].endswith('tif') or\
sys.argv[1].endswith('tiff'):
container = MaskImageWidget(profileselection=True)
import EdfFile
edf = EdfFile.EdfFile(sys.argv[1])
data = edf.GetData(0)
container.setImageData(data)
else:
image = qt.QImage(sys.argv[1])
#container.setQImage(image, image.width(),image.height())
container.setQImage(image, 200, 200)
else:
container = MaskImageWidget(profileselection=True)
data = numpy.arange(400 * 200).astype(numpy.int32)
data.shape = 400, 200
#data = numpy.eye(200)
container.setImageData(data, xScale=(200, 800), yScale=(400., 800.))
#data.shape = 100, 400
#container.setImageData(None)
#container.setImageData(data)
container.show()
def theSlot(ddict):
print(ddict['event'])
if QTVERSION < '4.0.0':
qt.QObject.connect(container,
qt.PYSIGNAL("MaskImageWidgetSignal"),
theSlot)
app.setMainWidget(container)
app.exec_loop()
else:
qt.QObject.connect(container,
qt.SIGNAL("MaskImageWidgetSignal"),
theSlot)
app.exec_()
if __name__ == "__main__":
test()
| gpl-2.0 |
jstoxrocky/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
zymsys/sms-tools | software/models_interface/sprModel_function.py | 18 | 3422 | # function to call the main analysis/synthesis functions in software/models/sprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sprModel as SPR
import stft as STFT
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80,
minSineDur=0.02, maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal plus residual analysis
tfreq, tmag, tphase, xr = SPR.sprModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
# sum sinusoids and residual
y, ys = SPR.sprModelSynth(tfreq, tmag, tphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot the sinusoidal frequencies on top of the residual spectrogram
if (tfreq.shape[1] > 0):
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k')
plt.title('sinusoidal tracks + residual spectrogram')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
dhruv13J/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
ajdawson/iris | lib/iris/tests/unit/plot/__init__.py | 9 | 4522 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.plot` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.plot import _broadcast_2d as broadcast
from iris.coords import AuxCoord
from iris.tests.stock import simple_2d, lat_lon_cube
@tests.skip_plot
class TestGraphicStringCoord(tests.GraphicsTest):
def setUp(self):
super(TestGraphicStringCoord, self).setUp()
self.cube = simple_2d(with_bounds=True)
self.cube.add_aux_coord(AuxCoord(list('abcd'),
long_name='str_coord'), 1)
self.lat_lon_cube = lat_lon_cube()
def tick_loc_and_label(self, axis_name, axes=None):
# Intentional lazy import so that subclasses can have an opportunity
# to change the backend.
import matplotlib.pyplot as plt
# Draw the plot to 'fix' the ticks.
if axes:
axes.figure.canvas.draw()
else:
axes = plt.gca()
plt.draw()
axis = getattr(axes, axis_name)
locations = axis.get_majorticklocs()
labels = [tick.get_text() for tick in axis.get_ticklabels()]
return list(zip(locations, labels))
def assertBoundsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(-1.0, ''), (0.0, 'a'), (1.0, 'b'),
(2.0, 'c'), (3.0, 'd'), (4.0, '')]
self.assertEqual(expected, actual)
def assertPointsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd')]
self.assertEqual(expected, actual)
@tests.skip_plot
class MixinCoords(object):
"""
Mixin class of common plotting tests providing 2-dimensional
permutations of coordinates and anonymous dimensions.
"""
def _check(self, u, v, data=None):
self.assertEqual(self.mpl_patch.call_count, 1)
if data is not None:
(actual_u, actual_v, actual_data), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_data, data)
else:
(actual_u, actual_v), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_u, u)
self.assertArrayEqual(actual_v, v)
def test_foo_bar(self):
self.draw_func(self.cube, coords=('foo', 'bar'))
u, v = broadcast(self.foo, self.bar)
self._check(u, v, self.data)
def test_bar_foo(self):
self.draw_func(self.cube, coords=('bar', 'foo'))
u, v = broadcast(self.bar, self.foo)
self._check(u, v, self.dataT)
def test_foo_0(self):
self.draw_func(self.cube, coords=('foo', 0))
u, v = broadcast(self.foo, self.bar_index)
self._check(u, v, self.data)
def test_1_bar(self):
self.draw_func(self.cube, coords=(1, 'bar'))
u, v = broadcast(self.foo_index, self.bar)
self._check(u, v, self.data)
def test_1_0(self):
self.draw_func(self.cube, coords=(1, 0))
u, v = broadcast(self.foo_index, self.bar_index)
self._check(u, v, self.data)
def test_0_foo(self):
self.draw_func(self.cube, coords=(0, 'foo'))
u, v = broadcast(self.bar_index, self.foo)
self._check(u, v, self.dataT)
def test_bar_1(self):
self.draw_func(self.cube, coords=('bar', 1))
u, v = broadcast(self.bar, self.foo_index)
self._check(u, v, self.dataT)
def test_0_1(self):
self.draw_func(self.cube, coords=(0, 1))
u, v = broadcast(self.bar_index, self.foo_index)
self._check(u, v, self.dataT)
| gpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/en/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| gpl-3.0 |
Wireless-Innovation-Forum/Spectrum-Access-System | src/studies/esc_impact_sim/run_esc_impact_sim.py | 1 | 10176 | # Copyright 2020 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""ESC Impact Simulator.
For evaluation of the impact of ESC protection into CBSD transmit power.
It uses the IAP reference implementation to assess how much power reduction is
required on each CBSD in the network.
Inputs:
- Some JSON FADs file defining the ESC networks.
- A CBSD deployment model. It can be either:
+ a JSON file defining the CBSD deployment model. For example one can use any
NTIA model that was generated for DPA neighborhood studies, found in:
https://github.com/Wireless-Innovation-Forum/Spectrum-Access-System/tree/master/data/research/deployment_models
+ The default NTIA nationwide deployment model (if no file specified): this
one is defined in the 2 CSV files NationWide_CatA, NationWide_CatB found
in the same data directory as above.
Output:
- A figure showing the impact of ESC protection on CBSDs power.
- A CSV file showing every CBSD with requested/obtained power.
Example Usage:
# Run the simulation for a list of grants, on all sensors of 2 ESC FADS.
python run_esc_impact.py --cbsd_file data/West14_reg_grant.json.zip \
--esc_fads esc1_fad.json,esc2_fad.json
# Run the simulation for a list of grants, on selected sensors of 2 ESC FADS:
# + only the sensors whose name contains `W12` and `W13` string.
# + also output one CSV per sensor containing all impacted CBSD.
python run_esc_impact.py --cbsd_file data/West14_reg_grant.json.zip \
--esc_fads esc1_fad.json,esc2_fad.json \
--sensors W12,W13 \
--output_csv
"""
import argparse
import json
import glob
import cartopy.crs as ccrs
import csv
import matplotlib.pyplot as plt
import numpy as np
import shapely.geometry as sgeo
import iap_patch
from deploy_model import deploy
from reference_models.iap import iap
from reference_models.geo import vincenty
from reference_models.tools import entities
from reference_models.tools import sim_utils
#----------------------------------------
# Setup the command line arguments
parser = argparse.ArgumentParser(description='ESC Impact Simulator')
# - Generic config.
parser.add_argument('--cbsd_file', type=str, default='',
help='CBSD deployment file (JSON).')
parser.add_argument('--esc_fads', type=str, default='',
help='The ESC FADs file (JSON) separated by a comma.')
parser.add_argument('--per_sensor', action='store_true',
help='If set, no ESC aggregation.')
parser.add_argument('--output_csv', action='store_true',
help='If set, output CSV per sensor of all impacted CBSD.')
parser.add_argument('--sectorized_catb', action='store_true',
help='If set, modelize CatB as multi sector.')
parser.set_defaults(sectorized_catb=False)
parser.add_argument('--sensors', type=str, default='',
help='Sensors to analyse (prefix).')
options = parser.parse_args()
#--------------------------------------------------
# The simulation
def esc_impact_sim(cbsd_reg_grant, esc_fads,
sensor_filters=None,
per_sensor_mode=False,
do_csv_output=False,
force_catb_omni=True):
"""ESC impact simulation.
Performs simulation on given input (CBSD and ESC), creates resulting plots
and output CSV file.
Args:
cbsd_reg_grant: The CBSD file (JSON) in 'reg_grant' format.
esc_fads: A list of ESC FAD data (JSON).
sensor_filters: List of string for filtering sensor (ex: W13, google, ..).
per_sensor_mode: If set, computes impact stats per sensor. No plots or CSV
output is done.
do_csv_output: If set, output
"""
# Read the grants:
if cbsd_reg_grant:
# .. from 'reg_grant' file
grants, _ = sim_utils.ReadTestHarnessConfigFile(cbsd_reg_grant)
else:
# .. from the default nation wide deployment model
print('Using NTIA NationWide deployment model')
cbsds = deploy.ReadNationWideDeploymentModel(force_omni=force_catb_omni)
grants = entities.ConvertToCbsdGrantInfo(cbsds, 3550, 3560)
# Reads the ESC sensors from all FADs.
sensors = []
for fad in esc_fads:
sensors.extend(json.load(open(fad))['recordData'])
# Filter the ESCs to simulate.
if sensor_filters:
filt_sensors = []
for sensor in sensors:
for token in sensor_filters:
if token in sensor['id']:
filt_sensors.append(sensor)
break
sensors = filt_sensors
if not sensors:
print('Simulation cancelled - No sensor name containing one of %s'
% sensor_filters)
return
print('ESCs included in the simulation:')
print([sensor['id'] for sensor in sensors])
# Run IAP simulation.
if not per_sensor_mode:
for sensor in sensors:
esc_allowed_interference = iap.performIapForEsc(sensor, grants, [])
impacted_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) > 0]
print('Number of impacted CBSDs: %d' % len(impacted_grants))
else:
# Special mode for getting # impacted grants per sensor independently.
for sensor in sensors:
# Clear iap_eirp before simulation of each ESC
for grant in grants:
grant.iap_eirp.clear()
grant.iap_eirp.add(grant.max_eirp)
esc_allowed_interference = iap.performIapForEsc(sensor, grants, [])
impacted_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) > 0]
print('Number of CBSDs impacted by %s: %d'
% (sensor['id'], len(impacted_grants)))
return
# Output the CSV.
if do_csv_output:
for sensor in sensors:
sensor_loc = (sensor['installationParam']['latitude'],
sensor['installationParam']['longitude'])
neighbor_grants = []
sensor_name = sensor['id'].split('/')
for idx, grant in enumerate(grants):
dist_km, _, _ = vincenty.GeodesicDistanceBearing(
grant.latitude, grant.longitude, sensor_loc[0], sensor_loc[1])
if ((grant.cbsd_category == 'A' and dist_km <= 40) or
(grant.cbsd_category == 'B' and dist_km <= 80)):
neighbor_grants.append(
[sensor_name[1], sensor_name[2], idx,
grant.cbsd_category, grant.indoor_deployment, grant.height_agl,
dist_km, grant.antenna_gain,
grant.max_eirp + 10,
min(grant.iap_eirp) + 10])
file_name = sensor_name[2] + '_neighbors.csv'
with open(file_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(
['ESC Network', 'ESC Sensor','CBSD ID','CBSD Category','Indoor CBSD',
'CBSD AGL','Distance to ESC (km)','CBSD Antenna Gain (dBi)',
'Max EIRP (dBm/10MHz)','Actual EIRP (dBm/10MHz)'])
writer.writerows(neighbor_grants)
# Retrieve the delta EIRP for plots and stats.
delta_eirp = []
for grant in grants:
delta_eirp.append(max(grant.iap_eirp) - min(grant.iap_eirp))
# Create figure with simple projection.
fig = plt.figure(figsize=(10,10))
subplot = 111
ax = fig.add_subplot(subplot, projection=ccrs.PlateCarree())
# Finds the bounding box (all CBSDs).
box_margin = 0.1 # about 10km
box = sgeo.box(*sgeo.MultiPoint(
[(grant.longitude, grant.latitude) for grant in grants]).bounds)
box = box.buffer(box_margin)
# Plot geometries.
ax.axis([box.bounds[0], box.bounds[2], box.bounds[1], box.bounds[3]])
ax.coastlines()
ax.stock_img()
# class1: no power reduction
class1_grants = [grant for grant in grants
if max(grant.iap_eirp) == min(grant.iap_eirp)]
class1_locations = ([grant.longitude for grant in class1_grants],
[grant.latitude for grant in class1_grants])
ax.scatter(*class1_locations, c='g', marker='1', s=50,
label='0 dB power reduction: %d' % len(class1_grants) )
# class2: less than 10 dB power reduction
class2_grants = [grant for grant in grants
if (max(grant.iap_eirp) > min(grant.iap_eirp)
and max(grant.iap_eirp)-min(grant.iap_eirp) < 10)]
class2_locations = ([grant.longitude for grant in class2_grants],
[grant.latitude for grant in class2_grants])
ax.scatter(*class2_locations, c='b', marker='1', s=50,
label='<10 dB power reduction: %d' % len(class2_grants) )
# class3: 10 dB or more power reduction
class3_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) >= 10]
class3_locations = ([grant.longitude for grant in class3_grants],
[grant.latitude for grant in class3_grants])
ax.scatter(*class3_locations, c='r', marker='1', s=50,
label='>=10 dB power reduction: %d' % len(class3_grants) )
ax.legend(loc=0)
ax.set_title('ESC Protection')
# Print histogram of power reduction
power_reduction = [max(grant.iap_eirp) - min(grant.iap_eirp)
for grant in grants]
plt.figure()
plt.hist(power_reduction, bins=np.arange(0.1, 50, 1))
plt.xlabel('CBSD power reduction')
plt.ylabel('# of CBSDs')
plt.grid()
#----------------------------------------------------------------
# Script main runner
if __name__ == '__main__':
esc_fads = options.esc_fads.split(',')
sensor_filters = options.sensors.split(',')
esc_impact_sim(options.cbsd_file, esc_fads, sensor_filters,
options.per_sensor, options.output_csv,
force_catb_omni=not options.sectorized_catb)
plt.show(block=True)
| apache-2.0 |
gfyoung/pandas | pandas/tests/reshape/merge/test_merge.py | 2 | 82726 | from datetime import date, datetime, timedelta
import random
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import MergeError, merge
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = list(range(ngroups))
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[: n - len(arr)])
random.shuffle(arr)
return arr
def get_series():
return [
Series([1], dtype="int64"),
Series([1], dtype="Int64"),
Series([1.23]),
Series(["foo"]),
Series([True]),
Series([pd.Timestamp("2018-01-01")]),
Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
]
def get_series_na():
return [
Series([np.nan], dtype="Int64"),
Series([np.nan], dtype="float"),
Series([np.nan], dtype="object"),
Series([pd.NaT]),
]
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype(request):
"""
A parametrized fixture returning a variety of Series of different
dtypes
"""
return request.param
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype2(request):
"""
A duplicate of the series_of_dtype fixture, so that it can be used
twice by a single function
"""
return request.param
@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
def series_of_dtype_all_na(request):
"""
A parametrized fixture returning a variety of Series with all NA
values
"""
return request.param
class TestMerge:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
self.left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
self.right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
def test_merge_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
result = pd.merge(df_empty, df_a, left_index=True, right_index=True)
expected = DataFrame({"a": []}, index=[], dtype="int64")
tm.assert_frame_equal(result, expected)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=["key1", "key2"])
tm.assert_frame_equal(joined, exp)
def test_merge_non_string_columns(self):
# https://github.com/pandas-dev/pandas/issues/17962
# Checks that method runs for non string column names
left = DataFrame(
{0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]}
)
right = left.astype(float)
expected = left
result = pd.merge(left, right)
tm.assert_frame_equal(expected, result)
def test_merge_index_as_on_arg(self):
# GH14355
left = self.df.set_index("key1")
right = self.df2.set_index("key1")
result = merge(left, right, on="key1")
expected = merge(self.df, self.df2, on="key1").set_index("key1")
tm.assert_frame_equal(result, expected)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=False
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=False
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=True
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=True
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
# inner join
result = merge(left, right, left_on="key", right_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected)
result = merge(right, left, right_on="key", left_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_merge_misspecified(self):
msg = "Must pass right_on or right_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, left_index=True)
msg = "Must pass left_on or left_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, right_index=True)
msg = (
'Can only pass argument "on" OR "left_on" and "right_on", not '
"a combination of both"
)
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.left, left_on="key", on="key")
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(self.df, self.df2, left_on=["key1"], right_on=["key1", "key2"])
def test_index_and_on_parameters_confusion(self):
msg = "right_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=False,
right_index=["key1", "key2"],
)
msg = "left_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=False,
)
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=["key1", "key2"],
)
def test_merge_overlap(self):
merged = merge(self.left, self.left, on="key")
exp_len = (self.left["key"].value_counts() ** 2).sum()
assert len(merged) == exp_len
assert "v1_x" in merged
assert "v1_y" in merged
def test_merge_different_column_key_names(self):
left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]})
merged = left.merge(
right, left_on="lkey", right_on="rkey", how="outer", sort=True
)
exp = Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
tm.assert_series_equal(merged["lkey"], exp)
exp = Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
tm.assert_series_equal(merged["rkey"], exp)
exp = Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
tm.assert_series_equal(merged["value_x"], exp)
exp = Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
tm.assert_series_equal(merged["value_y"], exp)
def test_merge_copy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=True)
merged["a"] = 6
assert (left["a"] == 0).all()
merged["d"] = "peekaboo"
assert (right["d"] == "bar").all()
def test_merge_nocopy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=False)
merged["a"] = 6
assert (left["a"] == 6).all()
merged["d"] = "peekaboo"
assert (right["d"] == "peekaboo").all()
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"]
)
right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))})
joined = merge(left, right, on="key", how="outer")
expected = DataFrame(
{
"key": [1, 1, 1, 1, 2, 2, 3, 4, 5],
"value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]),
"rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5],
},
columns=["value", "key", "rvalue"],
)
tm.assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({"key": [1], "v1": [10]})
df2 = DataFrame({"key": [2], "v1": [20]})
df = merge(df1, df2, how="outer")
assert df["key"].dtype == "int64"
df1 = DataFrame({"key": [True], "v1": [1]})
df2 = DataFrame({"key": [False], "v1": [0]})
df = merge(df1, df2, how="outer")
# GH13169
# this really should be bool
assert df["key"].dtype == "object"
df1 = DataFrame({"val": [1]})
df2 = DataFrame({"val": [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer")
assert df["key_0"].dtype == "int64"
def test_handle_join_key_pass_array(self):
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": np.arange(5)}, columns=["value", "key"]
)
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on="key", right_on=key, how="outer")
merged2 = merge(right, left, left_on=key, right_on="key", how="outer")
tm.assert_series_equal(merged["key"], merged2["key"])
assert merged["key"].notna().all()
assert merged2["key"].notna().all()
left = DataFrame({"value": np.arange(5)}, columns=["value"])
right = DataFrame({"rvalue": np.arange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer")
tm.assert_series_equal(
merged["key_0"], Series([1, 1, 1, 1, 2, 2, 3, 4, 5], name="key_0")
)
left = DataFrame({"value": np.arange(3)})
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how="outer")
tm.assert_series_equal(merged["key_0"], Series(key, name="key_0"))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
msg = (
"No common columns to perform merge on. "
f"Merge options: left_on={None}, right_on={None}, "
f"left_index={False}, right_index={False}"
)
with pytest.raises(MergeError, match=msg):
merge(df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4])
df2 = DataFrame(
{"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt]
)
_check_merge(df1, df2)
df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt])
df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt])
df2 = DataFrame(
{"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt]
)
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({"key": [1], "value": [2]})
right = DataFrame({"key": []})
result = merge(left, right, on="key", how="left")
tm.assert_frame_equal(result, left)
result = merge(right, left, on="key", how="right")
tm.assert_frame_equal(result, left)
@pytest.mark.parametrize(
"kwarg",
[
{"left_index": True, "right_index": True},
{"left_index": True, "right_on": "x"},
{"left_on": "a", "right_index": True},
{"left_on": "a", "right_on": "x"},
],
)
def test_merge_left_empty_right_empty(self, join_type, kwarg):
# GH 10824
left = DataFrame(columns=["a", "b", "c"])
right = DataFrame(columns=["x", "y", "z"])
exp_in = DataFrame(
columns=["a", "b", "c", "x", "y", "z"],
index=pd.Index([], dtype=object),
dtype=object,
)
result = pd.merge(left, right, how=join_type, **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = DataFrame(columns=["a", "b", "c"])
right = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"])
exp_out = DataFrame(
{
"a": np.array([np.nan] * 3, dtype=object),
"b": np.array([np.nan] * 3, dtype=object),
"c": np.array([np.nan] * 3, dtype=object),
"x": [1, 4, 7],
"y": [2, 5, 8],
"z": [3, 6, 9],
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
{"left_index": True, "right_index": True},
{"left_index": True, "right_on": "x"},
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = {"left_on": "a", "right_index": True}
check1(exp_in, kwarg)
exp_out["a"] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = {"left_on": "a", "right_on": "x"}
check1(exp_in, kwarg)
exp_out["a"] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
right = DataFrame(columns=["x", "y", "z"])
exp_out = DataFrame(
{
"a": [1, 4, 7],
"b": [2, 5, 8],
"c": [3, 6, 9],
"x": np.array([np.nan] * 3, dtype=object),
"y": np.array([np.nan] * 3, dtype=object),
"z": np.array([np.nan] * 3, dtype=object),
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
{"left_index": True, "right_index": True},
{"left_index": True, "right_on": "x"},
{"left_on": "a", "right_index": True},
{"left_on": "a", "right_on": "x"},
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
# GH 25183
df = DataFrame(
{"key": series_of_dtype, "value": series_of_dtype2},
columns=["key", "value"],
)
df_empty = df[:0]
expected = DataFrame(
{
"value_x": Series(dtype=df.dtypes["value"]),
"key": Series(dtype=df.dtypes["key"]),
"value_y": Series(dtype=df.dtypes["value"]),
},
columns=["value_x", "key", "value_y"],
)
actual = df_empty.merge(df, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na):
# GH 25183
df_left = DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
df_right = DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
expected = DataFrame(
{
"key": series_of_dtype,
"value_x": series_of_dtype_all_na,
"value_y": series_of_dtype_all_na,
},
columns=["key", "value_x", "value_y"],
)
actual = df_left.merge(df_right, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_nosort(self):
# GH#2098, TODO: anything to do?
d = {
"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2010, 2, 3),
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2012, 4, 3),
datetime(2012, 3, 4),
datetime(2008, 5, 1),
datetime(2010, 2, 3),
datetime(2012, 2, 3),
],
}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3, "var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on="var3", sort=False)
tm.assert_frame_equal(result, exp)
assert (df.var3.unique() == result.var3.unique()).all()
@pytest.mark.parametrize(
("sort", "values"), [(False, [1, 1, 0, 1, 1]), (True, [0, 1, 1, 1, 1])]
)
@pytest.mark.parametrize("how", ["left", "right"])
def test_merge_same_order_left_right(self, sort, values, how):
# GH#35382
df = DataFrame({"a": [1, 0, 1]})
result = df.merge(df, on="a", how=how, sort=sort)
expected = DataFrame(values, columns=["a"])
tm.assert_frame_equal(result, expected)
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (
DataFrame(
{
"i1": {0: 0.0, 1: 1},
"i2": {0: 0, 1: 1},
"i1_": {0: 0, 1: np.nan},
"i3": {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0},
}
)
.set_index(None)
.reset_index()[["i1", "i2", "i1_", "i3"]]
)
tm.assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on="i1")
expected = DataFrame(
{
"i1": {0: 0, 1: 1},
"i1_": {0: 0.0, 1: np.nan},
"i2": {0: 0.5, 1: 1.5},
"i3": {0: 0.69999999999999996, 1: np.nan},
}
)[["i1", "i2", "i1_", "i3"]]
tm.assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on="key1")
assert isinstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
# timedelta64 issues with join/merge
# GH 5695
d = {"d": datetime(2013, 11, 5, 5, 56), "t": timedelta(0, 22500)}
df = DataFrame(columns=list("dt"))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame(
{
"d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
"t": [timedelta(0, 22500), timedelta(0, 22500)],
}
)
tm.assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix="r", how="left")
expected = DataFrame(
{
"0": Series([td, td], index=list("AB")),
"0r": Series([td, pd.NaT], index=list("AB")),
}
)
tm.assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = DataFrame({"entity_id": [101, 102]})
s = Series([None, None], index=[101, 102], name="days")
for dtype in [
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
]:
df2 = s.astype(dtype).to_frame("days")
# coerces to datetime64[ns], thus should not be affected
assert df2["days"].dtype == "datetime64[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = DataFrame(
{
"entity_id": [101, 102],
"days": np.array(["nat", "nat"], dtype="datetime64[ns]"),
},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_other_timedelta_unit(self, unit):
# GH 13389
df1 = DataFrame({"entity_id": [101, 102]})
s = Series([None, None], index=[101, 102], name="days")
dtype = f"m8[{unit}]"
df2 = s.astype(dtype).to_frame("days")
assert df2["days"].dtype == "m8[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = DataFrame(
{"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df.columns = ["key", "foo", "foo"]
df2.columns = ["key", "bar", "bar"]
expected = DataFrame(
{
"key": [1, 2, 3],
"v1": [4, 5, 6],
"v2": [7, 8, 9],
"v3": [4, 5, 6],
"v4": [7, 8, 9],
}
)
expected.columns = ["key", "foo", "foo", "bar", "bar"]
tm.assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
msg = r"Data columns not unique: Index\(\['foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = DataFrame(
{
"key": pd.date_range("20151010", periods=2, tz="US/Eastern"),
"value": [1, 2],
}
)
right = DataFrame(
{
"key": pd.date_range("20151011", periods=3, tz="US/Eastern"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.date_range("20151010", periods=4, tz="US/Eastern"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = DataFrame(
{
"key": [1, 2],
"value": pd.date_range("20151010", periods=2, tz="US/Eastern"),
}
)
right = DataFrame(
{
"key": [2, 3],
"value": pd.date_range("20151011", periods=2, tz="US/Eastern"),
}
)
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern"))
+ [pd.NaT],
"value_y": [pd.NaT]
+ list(pd.date_range("20151011", periods=2, tz="US/Eastern")),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "datetime64[ns, US/Eastern]"
assert result["value_y"].dtype == "datetime64[ns, US/Eastern]"
def test_merge_on_datetime64tz_empty(self):
# https://github.com/pandas-dev/pandas/issues/25014
dtz = pd.DatetimeTZDtype(tz="UTC")
right = DataFrame(
{
"date": [pd.Timestamp("2018", tz=dtz.tz)],
"value": [4.0],
"date2": [pd.Timestamp("2019", tz=dtz.tz)],
},
columns=["date", "value", "date2"],
)
left = right[:0]
result = left.merge(right, on="date")
expected = DataFrame(
{
"value_x": Series(dtype=float),
"date2_x": Series(dtype=dtz),
"date": Series(dtype=dtz),
"value_y": Series(dtype=float),
"date2_y": Series(dtype=dtz),
},
columns=["value_x", "date2_x", "date", "value_y", "date2_y"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datetime64tz_with_dst_transition(self):
# GH 18885
df1 = DataFrame(
pd.date_range("2017-10-29 01:00", periods=4, freq="H", tz="Europe/Madrid"),
columns=["date"],
)
df1["value"] = 1
df2 = DataFrame(
{
"date": pd.to_datetime(
[
"2017-10-29 03:00:00",
"2017-10-29 04:00:00",
"2017-10-29 05:00:00",
]
),
"value": 2,
}
)
df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid")
result = pd.merge(df1, df2, how="outer", on="date")
expected = DataFrame(
{
"date": pd.date_range(
"2017-10-29 01:00", periods=7, freq="H", tz="Europe/Madrid"
),
"value_x": [1] * 4 + [np.nan] * 3,
"value_y": [np.nan] * 4 + [2] * 3,
}
)
tm.assert_frame_equal(result, expected)
def test_merge_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.merge(df2, left_index=True, right_index=True, how="inner")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum_x", "pnum_y"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_merge_on_periods(self):
left = DataFrame(
{"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]}
)
right = DataFrame(
{
"key": pd.period_range("20151011", periods=3, freq="D"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.period_range("20151010", periods=4, freq="D"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = DataFrame(
{"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")}
)
right = DataFrame(
{"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")}
)
exp_x = pd.period_range("20151010", periods=2, freq="D")
exp_y = pd.period_range("20151011", periods=2, freq="D")
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(exp_x) + [pd.NaT],
"value_y": [pd.NaT] + list(exp_y),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "Period[D]"
assert result["value_y"].dtype == "Period[D]"
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame(
{"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]}
)
df1_copy = df1.copy()
df2 = DataFrame(
{
"col1": [1, 2, 3, 4, 5],
"col_conflict": [1, 2, 3, 4, 5],
"col_right": [2, 2, 2, 2, 2],
}
)
df2_copy = df2.copy()
df_result = DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5],
"col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan],
"col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan],
"col_conflict_y": [np.nan, 1, 2, 3, 4, 5],
"col_right": [np.nan, 2, 2, 2, 2, 2],
}
)
df_result["_merge"] = Categorical(
[
"left_only",
"both",
"right_only",
"right_only",
"right_only",
"right_only",
],
categories=["left_only", "right_only", "both"],
)
df_result = df_result[
[
"col1",
"col_conflict_x",
"col_left",
"col_conflict_y",
"col_right",
"_merge",
]
]
test = merge(df1, df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
test = df1.merge(df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
# No side effects
tm.assert_frame_equal(df1, df1_copy)
tm.assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={"_merge": "custom_name"}
)
test_custom_name = merge(
df1, df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
msg = "indicator option can only accept boolean or string arguments"
with pytest.raises(ValueError, match=msg):
merge(df1, df2, on="col1", how="outer", indicator=5)
with pytest.raises(ValueError, match=msg):
df1.merge(df2, on="col1", how="outer", indicator=5)
# Check result integrity
test2 = merge(df1, df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test2 = df1.merge(df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test3 = merge(df1, df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test3 = df1.merge(df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test4 = merge(df1, df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
test4 = df1.merge(df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
# Check if working name in df
for i in ["_right_indicator", "_left_indicator", "_merge"]:
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
"Cannot use `indicator=True` option when data contains a "
f"column named {i}|"
"Cannot use name of an existing column for indicator column"
)
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
with pytest.raises(ValueError, match=msg):
df1.merge(df_badcolumn, on="col1", how="outer", indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]})
msg = "Cannot use name of an existing column for indicator column"
with pytest.raises(ValueError, match=msg):
merge(
df1,
df_badcolumn,
on="col1",
how="outer",
indicator="custom_column_name",
)
with pytest.raises(ValueError, match=msg):
df1.merge(
df_badcolumn, on="col1", how="outer", indicator="custom_column_name"
)
# Merge on multiple columns
df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]})
df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]})
hand_coded_result = DataFrame(
{"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]}
)
hand_coded_result["_merge"] = Categorical(
["left_only", "both", "right_only", "right_only"],
categories=["left_only", "right_only", "both"],
)
test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
def test_validation(self):
left = DataFrame(
{"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "b", "c", "d", "e"],
"c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"],
},
index=range(5),
)
# Make sure no side effects.
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, left_index=True, right_index=True, validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
# make sure merge still correct
expected = DataFrame(
{
"a_x": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"a_y": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
columns=["a_x", "b", "a_y", "c"],
)
result = merge(
left, right, left_index=True, right_index=True, validate="one_to_one"
)
tm.assert_frame_equal(result, expected)
expected_2 = DataFrame(
{
"a": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
)
result = merge(left, right, on="a", validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
tm.assert_frame_equal(result, expected_2)
result = merge(left, right, on="a", validate="one_to_one")
tm.assert_frame_equal(result, expected_2)
# One index, one column
expected_3 = DataFrame(
{
"b": ["cat", "dog", "weasel", "horse"],
"a": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
columns=["b", "a", "c"],
index=range(4),
)
left_index_reset = left.set_index("a")
result = merge(
left_index_reset,
right,
left_index=True,
right_on="a",
validate="one_to_one",
)
tm.assert_frame_equal(result, expected_3)
# Dups on right
right_w_dups = right.append(DataFrame({"a": ["e"], "c": ["moo"]}, index=[4]))
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_many",
)
msg = "Merge keys are not unique in right dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left, right_w_dups, on="a", validate="one_to_one")
# Dups on left
left_w_dups = left.append(
DataFrame({"a": ["a"], "c": ["cow"]}, index=[3]), sort=True
)
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right, on="a", validate="one_to_one")
# Dups on both
merge(left_w_dups, right_w_dups, on="a", validate="many_to_many")
msg = "Merge keys are not unique in right dataset; not a many-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right_w_dups,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-many merge"
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right_w_dups, on="a", validate="one_to_many")
# Check invalid arguments
msg = "Not a valid argument for validate"
with pytest.raises(ValueError, match=msg):
merge(left, right, on="a", validate="jibberish")
# Two column merge, dups in both, but jointly no dups.
left = DataFrame(
{
"a": ["a", "a", "b", "b"],
"b": [0, 1, 0, 1],
"c": ["cat", "dog", "weasel", "horse"],
},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
expected_multi = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"c": ["cat", "dog", "weasel"],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
msg = (
"Merge keys are not unique in either left or right dataset; "
"not a one-to-one merge"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, on="a", validate="1:1")
result = merge(left, right, on=["a", "b"], validate="1:1")
tm.assert_frame_equal(result, expected_multi)
def test_merge_two_empty_df_no_division_error(self):
# GH17776, PR #17846
a = DataFrame({"a": [], "b": [], "c": []})
with np.errstate(divide="raise"):
merge(a, a, on=("a", "b"))
@pytest.mark.parametrize("how", ["right", "outer"])
@pytest.mark.parametrize(
"index,expected_index",
[
(
CategoricalIndex([1, 2, 4]),
CategoricalIndex([1, 2, 4, None, None, None]),
),
(
DatetimeIndex(["2001-01-01", "2002-02-02", "2003-03-03"]),
DatetimeIndex(
["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT]
),
),
(Float64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(Int64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(
IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
IntervalIndex.from_tuples(
[(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan]
),
),
(
PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"),
PeriodIndex(
["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT],
freq="D",
),
),
(
TimedeltaIndex(["1d", "2d", "3d"]),
TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]),
),
],
)
def test_merge_on_index_with_more_values(self, how, index, expected_index):
# GH 24212
# pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
# -1 is interpreted as a missing value instead of the last element
df1 = DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index)
df2 = DataFrame({"b": [0, 1, 2, 3, 4, 5]})
result = df1.merge(df2, left_on="key", right_index=True, how=how)
expected = DataFrame(
[
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[np.nan, 3, 3],
[np.nan, 4, 4],
[np.nan, 5, 5],
],
columns=["a", "key", "b"],
)
expected.set_index(expected_index, inplace=True)
tm.assert_frame_equal(result, expected)
def test_merge_right_index_right(self):
# Note: the expected output here is probably incorrect.
# See https://github.com/pandas-dev/pandas/issues/17257 for more.
# We include this as a regression test for GH-24897.
left = DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]})
right = DataFrame({"b": [1, 2, 3]})
expected = DataFrame(
{"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]},
columns=["a", "key", "b"],
index=[0, 1, 2, np.nan],
)
result = left.merge(right, left_on="key", right_index=True, how="right")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["left", "right"])
def test_merge_preserves_row_order(self, how):
# GH 27453
left_df = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
right_df = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
result = left_df.merge(right_df, on=["animal", "max_speed"], how=how)
if how == "right":
expected = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
else:
expected = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
tm.assert_frame_equal(result, expected)
def test_merge_take_missing_values_from_index_of_other_dtype(self):
# GH 24212
left = DataFrame(
{
"a": [1, 2, 3],
"key": Categorical(["a", "a", "b"], categories=list("abc")),
}
)
right = DataFrame({"b": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"]))
result = left.merge(right, left_on="key", right_index=True, how="right")
expected = DataFrame(
{
"a": [1, 2, 3, None],
"key": Categorical(["a", "a", "b", "c"]),
"b": [1, 1, 2, 3],
},
index=[0, 1, 2, np.nan],
)
expected = expected.reindex(columns=["a", "key", "b"])
tm.assert_frame_equal(result, expected)
def test_merge_readonly(self):
# https://github.com/pandas-dev/pandas/issues/27943
data1 = DataFrame(
np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"]
)
data2 = DataFrame(
np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"]
)
data1._mgr.blocks[0].values.flags.writeable = False
data1.merge(data2) # no error
def _check_merge(x, y):
for how in ["inner", "left", "outer"]:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how, sort=True)
expected = expected.set_index("index")
# TODO check_names on merge?
tm.assert_frame_equal(result, expected, check_names=False)
class TestMergeDtypes:
@pytest.mark.parametrize(
"right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")]
)
def test_different(self, right_vals):
left = DataFrame(
{
"A": ["foo", "bar"],
"B": Series(["foo", "bar"]).astype("category"),
"C": [1, 2],
"D": [1.0, 2.0],
"E": Series([1, 2], dtype="uint64"),
"F": Series([1, 2], dtype="int32"),
}
)
right = DataFrame({"A": right_vals})
# GH 9780
# We allow merging on object and categorical cols and cast
# categorical cols to object
result = pd.merge(left, right, on="A")
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize("d1", [np.int64, np.int32, np.int16, np.int8, np.uint8])
@pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16])
def test_join_multi_dtypes(self, d1, d2):
dtype1 = np.dtype(d1)
dtype2 = np.dtype(d2)
left = DataFrame(
{
"k1": np.array([0, 1, 2] * 8, dtype=dtype1),
"k2": ["foo", "bar"] * 12,
"v": np.array(np.arange(24), dtype=np.int64),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=["k1", "k2"])
expected = left.copy()
if dtype2.kind == "i":
dtype2 = np.dtype("float64")
expected["v2"] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=["k1", "k2"], sort=True)
expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"int_vals, float_vals, exp_vals",
[
([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}),
([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}),
([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}),
],
)
def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):
# GH 16572
# Check that float column is not cast to object if
# merging on float and int columns
A = DataFrame({"X": int_vals})
B = DataFrame({"Y": float_vals})
expected = DataFrame(exp_vals)
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_key_dtype_cast(self):
# GH 17044
df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"])
df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"])
result = df1.merge(df2, on="key", how="left")
expected = DataFrame(
{"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]},
columns=["key", "v1", "v2"],
)
tm.assert_frame_equal(result, expected)
def test_merge_on_ints_floats_warning(self):
# GH 16572
# merge will produce a warning when merging on int and
# float columns where the float values are not exactly
# equal to their int representation
A = DataFrame({"X": [1, 2, 3]})
B = DataFrame({"Y": [1.1, 2.5, 3.0]})
expected = DataFrame({"X": [3], "Y": [3.0]})
with tm.assert_produces_warning(UserWarning):
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(UserWarning):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
# test no warning if float has NaNs
B = DataFrame({"Y": [np.nan, np.nan, 3.0]})
with tm.assert_produces_warning(None):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_incompat_infer_boolean_object(self):
# GH21119: bool + object bool merge OK
df1 = DataFrame({"key": Series([True, False], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
# with missing value
df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# merge on category coerces to object
([0, 1, 2], Series(["a", "b", "a"]).astype("category")),
([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")),
# no not infer
([0, 1], Series([False, True], dtype=object)),
([0, 1], Series([False, True], dtype=bool)),
],
)
def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals):
# these are explicitly allowed incompat merges, that pass thru
# the result type is dependent on if the values on the rhs are
# inferred, otherwise these will be coerced to object
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
result = pd.merge(df1, df2, on=["A"])
assert is_object_dtype(result.A.dtype)
result = pd.merge(df2, df1, on=["A"])
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# do not infer to numeric
(Series([1, 2], dtype="uint64"), ["a", "b", "c"]),
(Series([1, 2], dtype="int32"), ["a", "b", "c"]),
([0, 1, 2], ["0", "1", "2"]),
([0.0, 1.0, 2.0], ["0", "1", "2"]),
([0, 1, 2], ["0", "1", "2"]),
(
pd.date_range("1/1/2011", periods=2, freq="D"),
["2011-01-01", "2011-01-02"],
),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]),
(
pd.date_range("20130101", periods=3),
pd.date_range("20130101", periods=3, tz="US/Eastern"),
),
],
)
def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
# GH 9780, GH 15800
# Raise a ValueError when a user tries to merge on
# dtypes that are incompatible (e.g., obj and int/float)
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
msg = (
f"You are trying to merge on {df1['A'].dtype} and "
f"{df2['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df1, df2, on=["A"])
# Check that error still raised when swapping order of dataframes
msg = (
f"You are trying to merge on {df2['A'].dtype} and "
f"{df1['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df2, df1, on=["A"])
@pytest.fixture
def left():
np.random.seed(1234)
return DataFrame(
{
"X": Series(np.random.choice(["foo", "bar"], size=(10,))).astype(
CDT(["foo", "bar"])
),
"Y": np.random.choice(["one", "two", "three"], size=(10,)),
}
)
@pytest.fixture
def right():
np.random.seed(1234)
return DataFrame(
{"X": Series(["foo", "bar"]).astype(CDT(["foo", "bar"])), "Z": [1, 2]}
)
class TestMergeCategorical:
def test_identical(self, left):
# merging on the same, should preserve dtypes
merged = pd.merge(left, left, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(categories=["foo", "bar"]), np.dtype("O"), np.dtype("O")],
index=["X", "Y_x", "Y_y"],
)
tm.assert_series_equal(result, expected)
def test_basic(self, left, right):
# we have matching Categorical dtypes in X
# so should preserve the merged column
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[
CategoricalDtype(categories=["foo", "bar"]),
np.dtype("O"),
np.dtype("int64"),
],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
def test_merge_categorical(self):
# GH 9426
right = DataFrame(
{
"c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"},
"d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"},
}
)
left = DataFrame(
{
"a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"},
"b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"},
}
)
df = pd.merge(left, right, how="left", left_on="b", right_on="c")
# object-object
expected = df.copy()
# object-cat
# note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright["d"] = cright["d"].astype("category")
result = pd.merge(left, cright, how="left", left_on="b", right_on="c")
expected["d"] = expected["d"].astype(CategoricalDtype(["null"]))
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright["d"] = cright["d"].astype("category")
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
def tests_merge_categorical_unordered_equal(self):
# GH-19551
df1 = DataFrame(
{
"Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
}
)
df2 = DataFrame(
{
"Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]),
"Right": ["C1", "B1", "A1"],
}
)
result = pd.merge(df1, df2, on=["Foo"])
expected = DataFrame(
{
"Foo": Categorical(["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
"Right": ["A1", "B1", "C1"],
}
)
tm.assert_frame_equal(result, expected)
def test_other_columns(self, left, right):
# non-merge columns should preserve if possible
right = right.assign(Z=right.Z.astype("category"))
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[
CategoricalDtype(categories=["foo", "bar"]),
np.dtype("O"),
CategoricalDtype(categories=[1, 2]),
],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
# categories are preserved
assert left.X.values._categories_match_up_to_permutation(merged.X.values)
assert right.Z.values._categories_match_up_to_permutation(merged.Z.values)
@pytest.mark.parametrize(
"change",
[
lambda x: x,
lambda x: x.astype(CDT(["foo", "bar", "bah"])),
lambda x: x.astype(CDT(ordered=True)),
],
)
def test_dtype_on_merged_different(self, change, join_type, left, right):
# our merging columns, X now has 2 different dtypes
# so we must be object as a result
X = change(right.X.astype("object"))
right = right.assign(X=X)
assert is_categorical_dtype(left.X.values.dtype)
# assert not left.X.values._categories_match_up_to_permutation(right.X.values)
merged = pd.merge(left, right, on="X", how=join_type)
result = merged.dtypes.sort_index()
expected = Series(
[np.dtype("O"), np.dtype("O"), np.dtype("int64")], index=["X", "Y", "Z"]
)
tm.assert_series_equal(result, expected)
def test_self_join_multiple_categories(self):
# GH 16767
# non-duplicates should work with multiple categories
m = 5
df = DataFrame(
{
"a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m,
"b": ["t", "w", "x", "y", "z"] * 2 * m,
"c": [
letter
for each in ["m", "n", "u", "p", "o"]
for letter in [each] * 2 * m
],
"d": [
letter
for each in [
"aa",
"bb",
"cc",
"dd",
"ee",
"ff",
"gg",
"hh",
"ii",
"jj",
]
for letter in [each] * m
],
}
)
# change them all to categorical variables
df = df.apply(lambda x: x.astype("category"))
# self-join should equal ourselves
result = pd.merge(df, df, on=list(df.columns))
tm.assert_frame_equal(result, df)
def test_dtype_on_categorical_dates(self):
# GH 16900
# dates should not be coerced to ints
df = DataFrame(
[[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"]
)
df["date"] = df["date"].astype("category")
df2 = DataFrame(
[[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"]
)
df2["date"] = df2["date"].astype("category")
expected_outer = DataFrame(
[
[pd.Timestamp("2001-01-01").date(), 1.1, 1.3],
[pd.Timestamp("2001-01-02").date(), 1.3, np.nan],
[pd.Timestamp("2001-01-03").date(), np.nan, 1.4],
],
columns=["date", "num2", "num4"],
)
result_outer = pd.merge(df, df2, how="outer", on=["date"])
tm.assert_frame_equal(result_outer, expected_outer)
expected_inner = DataFrame(
[[pd.Timestamp("2001-01-01").date(), 1.1, 1.3]],
columns=["date", "num2", "num4"],
)
result_inner = pd.merge(df, df2, how="inner", on=["date"])
tm.assert_frame_equal(result_inner, expected_inner)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize(
"category_column,categories,expected_categories",
[
([False, True, True, False], [True, False], [True, False]),
([2, 1, 1, 2], [1, 2], [1, 2]),
(["False", "True", "True", "False"], ["True", "False"], ["True", "False"]),
],
)
def test_merging_with_bool_or_int_cateorical_column(
self, category_column, categories, expected_categories, ordered
):
# GH 17187
# merging with a boolean/int categorical column
df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column})
df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered))
df2 = DataFrame({"id": [2, 4], "num": [1, 9]})
result = df1.merge(df2)
expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]})
expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered))
tm.assert_frame_equal(expected, result)
def test_merge_on_int_array(self):
# GH 23020
df = DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1})
result = pd.merge(df, df, on="A")
expected = DataFrame(
{"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def left_df():
return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right_df():
return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
class TestMergeOnIndexes:
@pytest.mark.parametrize(
"how, sort, expected",
[
("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
(
"left",
False,
DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
),
(
"left",
True,
DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
),
(
"right",
False,
DataFrame(
{"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]
),
),
(
"right",
True,
DataFrame(
{"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]
),
),
(
"outer",
False,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
(
"outer",
True,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
],
)
def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
result = pd.merge(
left_df, right_df, left_index=True, right_index=True, how=how, sort=sort
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"),
Float64Index([1.0, 2.0], name="index_col"),
Int64Index([1, 2], name="index_col"),
UInt64Index([1, 2], name="index_col"),
RangeIndex(start=0, stop=2, name="index_col"),
DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"),
],
ids=lambda x: type(x).__name__,
)
def test_merge_index_types(index):
# gh-20777
# assert key access is consistent across index types
left = DataFrame({"left_data": [1, 2]}, index=index)
right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
result = left.merge(right, on=["index_col"])
expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"on,left_on,right_on,left_index,right_index,nm",
[
(["outer", "inner"], None, None, False, False, "B"),
(None, None, None, True, True, "B"),
(None, ["outer", "inner"], None, False, True, "B"),
(None, None, ["outer", "inner"], True, False, "B"),
(["outer", "inner"], None, None, False, False, None),
(None, None, None, True, True, None),
(None, ["outer", "inner"], None, False, True, None),
(None, None, ["outer", "inner"], True, False, None),
],
)
def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
# GH 21220
a = DataFrame(
{"A": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product(
[["a", "b"], [0, 1]], names=["outer", "inner"]
),
)
b = Series(
[1, 2, 3, 4],
index=pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["outer", "inner"]
),
name=nm,
)
expected = DataFrame(
{"A": [2, 4], "B": [1, 3]},
index=pd.MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
)
if nm is not None:
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
tm.assert_frame_equal(result, expected)
else:
msg = "Cannot merge a Series without a name"
with pytest.raises(ValueError, match=msg):
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
@pytest.mark.parametrize(
"col1, col2, kwargs, expected_cols",
[
(0, 0, {"suffixes": ("", "_dup")}, ["0", "0_dup"]),
(0, 0, {"suffixes": (None, "_dup")}, [0, "0_dup"]),
(0, 0, {"suffixes": ("_x", "_y")}, ["0_x", "0_y"]),
(0, 0, {"suffixes": ["_x", "_y"]}, ["0_x", "0_y"]),
("a", 0, {"suffixes": (None, "_y")}, ["a", 0]),
(0.0, 0.0, {"suffixes": ("_x", None)}, ["0.0_x", 0.0]),
("b", "b", {"suffixes": (None, "_y")}, ["b", "b_y"]),
("a", "a", {"suffixes": ("_x", None)}, ["a_x", "a"]),
("a", "b", {"suffixes": ("_x", None)}, ["a", "b"]),
("a", "a", {"suffixes": (None, "_x")}, ["a", "a_x"]),
(0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]),
("a", "a", {}, ["a_x", "a_y"]),
(0, 0, {}, ["0_x", "0_y"]),
],
)
def test_merge_suffix(col1, col2, kwargs, expected_cols):
# issue: 24782
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [4, 5, 6]})
expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols)
result = a.merge(b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
result = pd.merge(a, b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"how,expected",
[
(
"right",
DataFrame(
{"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]}
),
),
(
"outer",
DataFrame(
{
"A": [100, 200, 1, 300],
"B1": [60, 70, 80, np.nan],
"B2": [600, 700, np.nan, 800],
}
),
),
],
)
def test_merge_duplicate_suffix(how, expected):
left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]})
right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]})
result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x"))
expected.columns = ["A", "B_x", "B_x"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"col1, col2, suffixes",
[("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))],
)
def test_merge_suffix_error(col1, col2, suffixes):
# issue: 24782
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [3, 4, 5]})
# TODO: might reconsider current raise behaviour, see issue 24782
msg = "columns overlap but no suffix specified"
with pytest.raises(ValueError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}])
def test_merge_suffix_warns(suffixes):
a = DataFrame({"a": [1, 2, 3]})
b = DataFrame({"b": [3, 4, 5]})
with tm.assert_produces_warning(FutureWarning):
pd.merge(a, b, left_index=True, right_index=True, suffixes={"left", "right"})
@pytest.mark.parametrize(
"col1, col2, suffixes, msg",
[
("a", "a", ("a", "b", "c"), r"too many values to unpack \(expected 2\)"),
("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"),
],
)
def test_merge_suffix_length_error(col1, col2, suffixes, msg):
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [3, 4, 5]})
with pytest.raises(ValueError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("cat_dtype", ["one", "two"])
@pytest.mark.parametrize("reverse", [True, False])
def test_merge_equal_cat_dtypes(cat_dtype, reverse):
# see gh-22501
cat_dtypes = {
"one": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
"two": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
}
df1 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]}
).set_index("foo")
data_foo = ["a", "b", "c"]
data_right = [1, 2, 3]
if reverse:
data_foo.reverse()
data_right.reverse()
df2 = DataFrame(
{"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{
"left": [1, 2, 3],
"right": [1, 2, 3],
"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]),
}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_equal_cat_dtypes2():
# see gh-22501
cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False)
# Test Data
df1 = DataFrame(
{"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]}
).set_index("foo")
df2 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_on_cat_and_ext_array():
# GH 28668
right = DataFrame(
{"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")}
)
left = right.copy()
left["a"] = left["a"].astype("category")
result = pd.merge(left, right, how="inner", on="a")
expected = right.copy()
tm.assert_frame_equal(result, expected)
def test_merge_multiindex_columns():
# Issue #28518
# Verify that merging two dataframes give the expected labels
# The original cause of this issue come from a bug lexsort_depth and is tested in
# test_lexsort_depth
letters = ["a", "b", "c", "d"]
numbers = ["1", "2", "3"]
index = pd.MultiIndex.from_product((letters, numbers), names=["outer", "inner"])
frame_x = DataFrame(columns=index)
frame_x["id"] = ""
frame_y = DataFrame(columns=index)
frame_y["id"] = ""
l_suf = "_x"
r_suf = "_y"
result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf)))
# Constructing the expected results
expected_labels = [letter + l_suf for letter in letters] + [
letter + r_suf for letter in letters
]
expected_index = pd.MultiIndex.from_product(
[expected_labels, numbers], names=["outer", "inner"]
)
expected = DataFrame(columns=expected_index)
expected["id"] = ""
tm.assert_frame_equal(result, expected)
def test_merge_datetime_upcast_dtype():
# https://github.com/pandas-dev/pandas/issues/31208
df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
df2 = DataFrame(
{"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
)
result = pd.merge(df1, df2, how="left", on="y")
expected = DataFrame(
{
"x": ["a", "b", "c"],
"y": ["1", "2", "4"],
"z": pd.to_datetime(["2000", "2001", "NaT"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("n_categories", [5, 128])
def test_categorical_non_unique_monotonic(n_categories):
# GH 28189
# With n_categories as 5, we test the int8 case is hit in libjoin,
# with n_categories as 128 we test the int16 case.
left_index = CategoricalIndex([0] + list(range(n_categories)))
df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index)
df2 = DataFrame(
[[6]],
columns=["value"],
index=CategoricalIndex([0], categories=np.arange(n_categories)),
)
result = merge(df1, df2, how="left", left_index=True, right_index=True)
expected = DataFrame(
[[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)],
columns=["value_x", "value_y"],
index=left_index,
)
tm.assert_frame_equal(expected, result)
def test_merge_join_categorical_multiindex():
# From issue 16627
a = {
"Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
expected = expected.drop(["Cat", "Int"], axis=1)
result = a.join(b, on=["Cat1", "Int1"])
tm.assert_frame_equal(expected, result)
# Same test, but with ordered categorical
a = {
"Cat1": Categorical(
["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True
),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(
["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True
),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
expected = expected.drop(["Cat", "Int"], axis=1)
result = a.join(b, on=["Cat1", "Int1"])
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
("kwargs", "err_msg"),
[
({"left_on": "a", "left_index": True}, ["left_on", "left_index"]),
({"right_on": "a", "right_index": True}, ["right_on", "right_index"]),
],
)
def test_merge_join_cols_error_reporting_duplicates(func, kwargs, err_msg):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = rf'Can only pass argument "{err_msg[0]}" OR "{err_msg[1]}" not both\.'
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, **kwargs)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
("kwargs", "err_msg"),
[
({"left_on": "a"}, ["right_on", "right_index"]),
({"right_on": "a"}, ["left_on", "left_index"]),
],
)
def test_merge_join_cols_error_reporting_missing(func, kwargs, err_msg):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = rf'Must pass "{err_msg[0]}" OR "{err_msg[1]}"\.'
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, **kwargs)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
"kwargs",
[
{"right_index": True},
{"left_index": True},
],
)
def test_merge_join_cols_error_reporting_on_and_index(func, kwargs):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = (
r'Can only pass argument "on" OR "left_index" '
r'and "right_index", not a combination of both\.'
)
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, on="a", **kwargs)
def test_merge_right_left_index():
# GH#38616
left = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
right = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
result = pd.merge(left, right, how="right", left_index=True, right_on="x")
expected = DataFrame(
{
"x": [1, 1],
"x_x": [1, 1],
"z_x": ["foo", "foo"],
"x_y": [1, 1],
"z_y": ["foo", "foo"],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_result_empty_index_and_on():
# GH#33814
df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"])
df2 = DataFrame({"b": [1]}).set_index(["b"])
expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"])
result = merge(df1, df2, left_on=["b"], right_index=True)
tm.assert_frame_equal(result, expected)
result = merge(df2, df1, left_index=True, right_on=["b"])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
carrillo/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ambikeshwar1991/gnuradio | gr-input/python/plzr_plot.py | 2 | 6475 | #!/usr/bin/env python
#
# Copyright 2015 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
import time
import numpy
from gnuradio import gr
import sciscipy
import scipy.signal as signal
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.pyplot import axvline, axhline
class plzr_plot(gr.sync_block):
"""
docstring for block add_python
"""
def __init__(self,order1,order2,order3,num_tf,itype):
a = []
for i in range(0,2*num_tf):
a.append(numpy.float32)
print "This is a", a
self.i = 0
self.max_2 = [0,0,0,0]
self.z = []
self.z1 = [0,0,0,0]
self.z2 = []
self.z3= [0,0,0,0]
self.num_tf = num_tf
self.itype = itype
self.order1=int(order1)+1
self.order2=int(order2)+1
self.order3=int(order3)+1
self.b = self.order1*[0]
self.b11 = self.order1*[0]
self.c = self.order1*[0]
self.c11 = self.order1*[0]
self.b1 = self.order2*[0]
self.b12 = self.order2*[0]
self.c1 = self.order2*[0]
self.c12 = self.order2*[0]
self.b2 = self.order3*[0]
self.b21 = self.order3*[0]
self.c2 = self.order3*[0]
self.c21 = self.order3*[0]
gr.sync_block.__init__(self,
name="plzr_plot",
in_sig=[numpy.float32,numpy.float32,numpy.float32,numpy.float32],
out_sig=None)
def plot_cont(self,b,c):
self.z, self.p, self.k = signal.tf2zpk(b, c)
plt.plot(numpy.real(self.z), numpy.imag(self.z), 'or', label='Zeros of TF 1')
plt.plot(numpy.real(self.p), numpy.imag(self.p), 'xb', label='Poles of TF 1')
plt.legend(loc=1,numpoints=1)
def plot_cont1(self,b1,c1):
self.z1, self.p1, self.k1 = signal.tf2zpk(b1, c1)
plt.plot(numpy.real(self.z1), numpy.imag(self.z1), 'og', label='Zeros of TF2')
plt.plot(numpy.real(self.p1), numpy.imag(self.p1), 'xm', label='Poles of TF2')
plt.legend(loc=1,numpoints=1)
def plot_cont2(self,b2,c2):
self.z2, self.p2, self.k2 = signal.tf2zpk(b2, c2)
plt.plot(numpy.real(self.z2), numpy.imag(self.z2), 'oy', label='Zeros of TF3')
plt.plot(numpy.real(self.p2), numpy.imag(self.p2), 'xk', label='Poles of TF3')
plt.legend(loc=1,numpoints=1)
def common_plot_cont(self):
ax = plt.subplot(1, 1, 0)
axvline(0, color='0.7')
axhline(0, color='0.7')
plt.title('Pole / Zero Plot')
plt.ylabel('Real')
plt.xlabel('Imaginary')
plt.grid()
def common_plot_dist(self):
ax = plt.subplot(1, 1, 0)
unit_circle = patches.Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.1)
ax.add_patch(unit_circle)
axvline(0, color='0.7')
axhline(0, color='0.7')
plt.title('Pole / Zero Plot')
plt.ylabel('Real')
plt.xlabel('Imaginary')
plt.grid()
def work(self, input_items, output_items):
k1 = self.order1
k2 = self.order2
k3 = self.order3
try:
for i in range(0,k1):
self.b[i] = input_items[0][i]
v1 = 0
for i2 in reversed(self.b):
self.b11[v1]=i2
v1 = v1 + 1
print "I am z1\n",self.b11
except IndexError:
pass
try:
for j in range(0,k1):
self.c[j] = input_items[1][j]
v = 0
for i3 in reversed(self.c):
self.c11[v] = i3
v = v + 1
print "I am z3\n", self.c11
except IndexError:
pass
try:
for i in range(0,k2):
self.b1[i] = input_items[2][i]
v1 = 0
for i2 in reversed(self.b1):
self.b12[v1]=i2
except IndexError:
pass
try:
for j in range(0,k2):
self.c1[j] = input_items[3][j]
v = 0
for i3 in reversed(self.c1):
self.c12[v] = i3
v = v + 1
except IndexError:
pass
try:
for i in range(0,k3):
self.b2[i] = input_items[4][i]
v1 = 0
for i2 in reversed(self.b2):
self.b21[v1]=i2
v1 = v1 + 1
except IndexError:
pass
try:
for j in range(0,k3):
self.c2[j] = input_items[5][j]
v = 0
for i3 in reversed(self.c2):
self.c21[v] = i3
v = v + 1
except IndexError:
pass
if (self.itype == 12):
self.common_plot_cont()
else:
self.common_plot_dist()
if self.num_tf == 1:
self.plot_cont(self.b11,self.c11)
self.z11 = [abs(k11) for k11 in self.z]
self.z22 = max(self.z11)
self.p11 = [abs(k22) for k22 in self.p]
self.p22 = max(self.p11)
if(z22 > p22):
limit = self.z22 + 1
else:
limit = self.p22 + 1
plt.ylim([-limit,limit])
plt.xlim([-limit,limit])
plt.ion()
plt.draw()
elif self.num_tf == 2:
self.plot_cont(self.b11,self.c11)
self.plot_cont1(self.b12,self.c12)
self.z11 = [abs(k11) for k11 in self.z]
try:
self.z22 = max(self.z11)
except ValueError:
self.z22 = 0
self.p11 = [abs(k22) for k22 in self.p]
self.p22 = max(self.p11)
self.z21 = [abs(k21) for k21 in self.z1]
try:
self.z32 = max(self.z21)
except ValueError:
self.z32 = 0
self.p21 = [abs(k32) for k32 in self.p1]
self.p32 = max(self.p21)
self.max_2[0] = self.z22
self.max_2[1] = self.p22
self.max_2[2] = self.z32
self.max_2[3] = self.p32
limit = max(self.max_2)
plt.ylim([-limit,limit])
plt.xlim([-limit,limit])
plt.ion()
plt.draw()
else:
self.plot_cont(self.b11,self.c11)
self.plot_cont1(self.b12,self.c12)
self.plot_cont2(self.b21,self.c21)
plt.ion()
plt.draw()
plt.clf()
in0 = input_items[0]
# <+signal processing here+>
return len(input_items[0])
| gpl-3.0 |
dkoes/qsar-tools | trainclassifier.py | 1 | 5406 | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import argparse, sys, pickle
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import *
from sklearn.model_selection import KFold
from sklearn.metrics import *
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
def scoremodel(model, x, y):
'''Return fitness of model. We'll use AUC.'''
p = model.predict(x).squeeze()
return roc_auc_score(y,p)
def trainmodels(m, x, y):
'''For the model type m, train a classifier on x->y using built-in CV to
parameterize. Return both this model and an unfit model that can be used for CV.
Note for PLS we cheat a little bit since there isn't a built-in CV trainer.
'''
if m == 'knn':
#have to manually cross-validate to choose number of components
kf = KFold(n_splits=3)
bestscore = -10000
besti = 0
for i in range(1,10):
#try larger number of components until average CV perf decreases
knn = KNeighborsClassifier(i)
scores = []
#TODO: parallelize below
for train,test in kf.split(x):
xtrain = x[train]
ytrain = y[train]
xtest = x[test]
ytest = y[test]
knn.fit(xtrain,ytrain)
score = scoremodel(knn,xtest,ytest)
scores.append(score)
ave = np.mean(scores)
if ave > bestscore:
bestscore = ave
besti = i
model = KNeighborsClassifier(besti)
model.fit(x,y)
print("Best k = %d"%besti)
unfit = KNeighborsClassifier(besti) #choose number of components using full data - iffy
elif m == 'svm':
C_range = np.logspace(-2, 3, 6)
gamma_range = np.logspace(-9, 3, 7)
param_grid = dict(gamma=gamma_range, C=C_range)
grid = GridSearchCV(SVC(), param_grid=param_grid,n_jobs=-1)
grid.fit(x,y)
print("svm params",grid.best_params_)
model = grid.best_estimator_
unfit = SVC(**grid.best_params_)
elif m == 'logistic':
model = LogisticRegressionCV(n_jobs=-1)
model.fit(x,y)
unfit = LogisticRegressionCV(n_jobs=-1)
elif m == 'rf':
#evalute different max depths
parameters = {'max_depth': range(2,int(np.log2(len(x[0])))+1)}
clf = GridSearchCV(RandomForestClassifier(), parameters, 'roc_auc',n_jobs=-1)
clf.fit(x,y)
model = clf.best_estimator_
print("max_depth =",clf.best_params_['max_depth'])
unfit = RandomForestClassifier(**clf.best_params_)
return (model,unfit)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train linear model from fingerprint file')
parser.add_argument('input',help='Fingerprints input file')
parser.add_argument('-o','--outfile', type=argparse.FileType('wb'), help="Output file for model (trained on full data)")
parser.add_argument('-k','--kfolds',type=int,default=3,help="Number of folds for cross-validation")
parser.add_argument('-y','--labels',help="Labels (y-values). Will override any specified in fingerprints file")
models = parser.add_mutually_exclusive_group()
models.add_argument('--svm',action='store_const',dest='model',const='svm',help="Use support vector machine (rbf kernel)")
models.add_argument('--knn',action='store_const',dest='model',const='knn',help="Use k-nearest neighbors")
models.add_argument('--rf',action='store_const',dest='model',const='rf',help="Use random forest")
models.add_argument('--logistic',action='store_const',dest='model',const='logistic',help="Use logistic regression")
parser.set_defaults(model='knn')
args = parser.parse_args()
#out = args.outfile
comp = 'gzip' if args.input.endswith('.gz') else None
data = pd.read_csv(args.input,compression=comp,header=None,delim_whitespace=True)
if args.labels: #override what is in fingerprint file
y = np.genfromtxt(args.labels,np.float)
if len(y) != len(data):
print("Mismatched length between affinities and fingerprints (%d vs %d)" % (len(y),len(x)))
sys.exit(-1)
data.iloc[:,1] = y
np.random.seed(0) #I like reproducible results, so fix a seed
data = data.iloc[np.random.permutation(len(data))] #shuffle order of data
smi = np.array(data.iloc[:,0])
y = np.array(data.iloc[:,1],dtype=np.float)
x = np.array(data.iloc[:,2:],dtype=np.float)
del data #dispose of pandas copy
(fit,unfit) = trainmodels(args.model, x, y)
fitscore = scoremodel(fit,x,y)
print("Full Regression: AUC=%.4f" % fitscore)
kf = KFold(n_splits=3)
scores = []
for train,test in kf.split(x):
xtrain = x[train]
ytrain = y[train]
xtest = x[test]
ytest = y[test]
unfit.fit(xtrain,ytrain)
scores.append(scoremodel(unfit, xtest, ytest))
print("CV: AUC=%.4f (std %.4f)" % (np.mean(scores), np.std(scores)))
print("Gap: %.4f" % (fitscore-np.mean(scores)))
if args.outfile:
pickle.dump(fit, args.outfile, pickle.HIGHEST_PROTOCOL)
| apache-2.0 |
EttusResearch/gnuradio | gr-digital/examples/example_fll.py | 49 | 5715 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_fll(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_err = scipy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
zohannn/motion_manager | scripts/old/task_2_reaching/var_0/predicting_task_2_var_0_10k.py | 1 | 138204 | #!/usr/bin/env python3
import sys
import pandas as pd
from sklearn import decomposition
from sklearn import metrics
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
from random import randint
# HUPL
from HUPL.learner import preprocess_features
from HUPL.learner import preprocess_targets
from HUPL.learner import normalize_linear_scale
from HUPL.learner import denormalize_linear_scale
from HUPL.learner import my_input_fn
from HUPL.learner import construct_feature_columns
if len(sys.argv) <= 3:
sys.exit("Not enough args")
data_file = str(sys.argv[1])
models_dir = str(sys.argv[2])
pred_file_path = str(sys.argv[3])
data_pred = sys.argv[4].split(',')
data_pred_mod = np.array(data_pred)
# Target info
#target_x = float(data_pred[0])
#target_y = float(data_pred[1])
#target_z = float(data_pred[2])
#target_roll = float(data_pred[3])
#target_pitch = float(data_pred[4])
#target_yaw = float(data_pred[5])
# Obstacle 1 info
#obstacle_1_x = float(data_pred[6])
#obstacle_1_y = float(data_pred[7])
#obstacle_1_z = float(data_pred[8])
#obstacle_1_roll = float(data_pred[9])
#obstacle_1_pitch = float(data_pred[10])
#obstacle_1_yaw = float(data_pred[11])
# Settings
pd.set_option('display.max_columns', 10)
print_en = False
print_en_xf_plan = False
predict_xf_plan = True
dir_path_xf_plan = models_dir+"/xf_plan"
xf_plan_prediction = pd.DataFrame()
print_en_zf_L_plan = False
predict_zf_L_plan = True
dir_path_zf_L_plan = models_dir+"/zf_L_plan"
zf_L_plan_prediction = pd.DataFrame()
print_en_zf_U_plan = False
predict_zf_U_plan = True
dir_path_zf_U_plan = models_dir+"/zf_U_plan"
zf_U_plan_prediction = pd.DataFrame()
print_en_dual_f_plan = False
predict_dual_f_plan = True
dir_path_dual_f_plan = models_dir+"/dual_f_plan"
dual_f_plan_prediction = pd.DataFrame()
print_en_x_bounce = False
predict_x_bounce = True
dir_path_x_bounce = models_dir+"/x_bounce"
x_bounce_prediction = pd.DataFrame()
print_en_zb_L= False
predict_zb_L = True
dir_path_zb_L = models_dir+"/zb_L"
zb_L_prediction = pd.DataFrame()
print_en_zb_U = False
predict_zb_U = True
dir_path_zb_U = models_dir+"/zb_U"
zb_U_prediction = pd.DataFrame()
print_en_dual_bounce = False
predict_dual_bounce = True
dir_path_dual_bounce = models_dir+"/dual_bounce"
dual_bounce_prediction = pd.DataFrame()
learning_rate=0.009
learning_rate_class=0.009
n_pca_comps_xf_plan = 7
n_clusters_xf_plan = 6
min_cluster_size_xf_plan = 10
th_xf_plan = 0.001
periods_xf_plan = 20
steps_xf_plan = 1000
batch_size_xf_plan = 100
units_xf_plan = [10,10]
units_xf_plan_class = [10,10,10]
n_clusters_zf_L_plan = 2
min_cluster_size_zf_L_plan = 10
th_zf_L_plan = 0.001
periods_zf_L_plan = 15
steps_zf_L_plan = 500
batch_size_zf_L_plan = 100
units_zf_L_plan = [10,10]
units_zf_L_plan_class = [10,10,10]
n_clusters_zf_U_plan = 2
min_cluster_size_zf_U_plan = 10
th_zf_U_plan = 0.001
periods_zf_U_plan = 10
steps_zf_U_plan = 1000
batch_size_zf_U_plan = 100
units_zf_U_plan = [10,10]
units_zf_U_plan_class = [10,10,10]
n_pca_comps_dual_f_plan = 10
n_clusters_dual_f_plan = 4
min_cluster_size_dual_f_plan = 10
th_dual_f_plan = 0.0001
periods_dual_f_plan = 10
steps_dual_f_plan = 1000
batch_size_dual_f_plan = 100
units_dual_f_plan = [10,10]
units_dual_f_plan_class = [10,10,10]
n_pca_comps_x_bounce = 9
n_clusters_x_bounce = 6
min_cluster_size_x_bounce = 10
th_x_bounce = 0.001
periods_x_bounce = 20
steps_x_bounce = 1000
batch_size_x_bounce = 100
units_x_bounce = [10,10]
units_x_bounce_class = [10,10,10]
n_clusters_zb_L = 2
min_cluster_size_zb_L = 10
th_zb_L = 0.001
periods_zb_L = 10
steps_zb_L = 500
batch_size_zb_L = 100
units_zb_L = [10,10]
units_zb_L_class = [10,10,10]
n_clusters_zb_U = 2
min_cluster_size_zb_U = 10
th_zb_U = 0.001
periods_zb_U = 10
steps_zb_U = 500
batch_size_zb_U = 100
units_zb_U = [10,10]
units_zb_U_class = [10,10,10]
n_pca_comps_dual_bounce = 10
n_clusters_dual_bounce = 6
min_cluster_size_dual_bounce = 10
th_dual_bounce = 0.001
periods_dual_bounce = 20
steps_dual_bounce = 1000
batch_size_dual_bounce = 100
units_dual_bounce = [10,10]
units_dual_bounce_class = [10,10,10]
task_1_dataframe = pd.read_csv(data_file,sep=",")
task_1_dataframe = task_1_dataframe.reindex(np.random.permutation(task_1_dataframe.index))
(inputs_dataframe,inputs_cols,null_in_cols,id_null_cols) = preprocess_features(task_1_dataframe)
#print("Input columns:")
#print(inputs_cols)
#print("Null columns:")
#print(null_in_cols)
#print("ID of null columns:")
#print(id_null_cols)
data_pred_mod_new = np.delete(data_pred_mod,id_null_cols)
#print("Modified data:")
#print(data_pred_mod_new)
r = randint(0,len(task_1_dataframe.index))
task_1_sample = task_1_dataframe.iloc[[r]]
cols_xf_plan_tot = [col for col in task_1_dataframe if col.startswith('xf_plan')]
cols_zf_L_plan_tot = [col for col in task_1_dataframe if col.startswith('zf_L_plan')]
cols_zf_U_plan_tot = [col for col in task_1_dataframe if col.startswith('zf_U_plan')]
cols_dual_f_plan_tot = [col for col in task_1_dataframe if col.startswith('dual_f_plan')]
cols_x_bounce_tot = [col for col in task_1_dataframe if col.startswith('x_bounce')]
cols_zb_L_tot = [col for col in task_1_dataframe if col.startswith('zb_L')]
cols_zb_U_tot = [col for col in task_1_dataframe if col.startswith('zb_U')]
cols_dual_bounce_tot = [col for col in task_1_dataframe if col.startswith('dual_bounce')]
normalized_inputs,normalized_inputs_max,normalized_inputs_min = normalize_linear_scale(inputs_dataframe)
(outputs_dataframe, null_outputs) = preprocess_targets(task_1_dataframe)
inputs_test_df= pd.DataFrame([data_pred_mod_new],columns=inputs_cols)
norm_inputs_test_df = pd.DataFrame([data_pred_mod_new],columns=inputs_cols)
#print(inputs_test_df)
for col in inputs_cols:
min_val = normalized_inputs_min[col]
max_val = normalized_inputs_max[col]
scale = (max_val - min_val) / 2.0
norm_inputs_test_df[col] = (((float(inputs_test_df[col]) - min_val) / scale) - 1.0)
#print(norm_inputs_test_df)
# plan final posture columns
cols_x_f_plan = [col for col in outputs_dataframe if col.startswith('xf_plan')]
cols_zf_L_plan = [col for col in outputs_dataframe if col.startswith('zf_L_plan')]
cols_zf_U_plan = [col for col in outputs_dataframe if col.startswith('zf_U_plan')]
cols_dual_f_plan = [col for col in outputs_dataframe if col.startswith('dual_f_plan')]
# bounce posture columns
cols_x_bounce = [col for col in outputs_dataframe if col.startswith('x_bounce')]
cols_zb_L = [col for col in outputs_dataframe if col.startswith('zb_L')]
cols_zb_U = [col for col in outputs_dataframe if col.startswith('zb_U')]
cols_dual_bounce = [col for col in outputs_dataframe if col.startswith('dual_bounce')]
outputs_xf_plan_df = outputs_dataframe[cols_x_f_plan]
outputs_zf_L_plan_df = outputs_dataframe[cols_zf_L_plan]
outputs_zf_U_plan_df = outputs_dataframe[cols_zf_U_plan]
outputs_dual_f_plan_df = outputs_dataframe[cols_dual_f_plan]
outputs_x_bounce_df = outputs_dataframe[cols_x_bounce]
outputs_zb_L_df = outputs_dataframe[cols_zb_L]
outputs_zb_U_df = outputs_dataframe[cols_zb_U]
outputs_dual_bounce_df = outputs_dataframe[cols_dual_bounce]
outputs_dual_bounce_df = outputs_dual_bounce_df.clip(lower=0.0001,upper=50)
if(print_en):
print("X_f_plan:")
print(outputs_xf_plan_df.head())
print("zf_L_plan:")
print(outputs_zf_L_plan_df.head())
print("zf_U_plan:")
print(outputs_zf_U_plan_df.head())
print("dual_f_plan:")
print(outputs_dual_f_plan_df.head())
print("X_bounce:")
print(outputs_x_bounce_df.head())
print("zb_L:")
print(outputs_zb_L_df.head())
print("zb_U:")
print(outputs_zb_U_df.head())
print("dual_bounce:")
print(outputs_dual_bounce_df.head())
if predict_xf_plan:
# ----- FINAL POSTURE SELECTION: FINAL POSTURE --------------------------------------------- #
if not outputs_xf_plan_df.empty:
outputs_xf_plan_df_max = pd.Series.from_csv(dir_path_xf_plan+"/xf_plan_max.csv",sep=',')
outputs_xf_plan_df_min = pd.Series.from_csv(dir_path_xf_plan + "/xf_plan_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
xf_plan_rdm_prediction = task_1_sample[cols_xf_plan_tot]
if (print_en_xf_plan):
print("Random xf_plan: ")
print(xf_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_xf_plan,
hidden_units=units_xf_plan_class,
model_dir=dir_path_xf_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_xf_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_xf_plan + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1,len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros,columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted NN xf_plan: ")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_xf_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
svm_regressor = joblib.load(dir_path_xf_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted SVM xf_plan: ")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_xf_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
knn_regressor = joblib.load(dir_path_xf_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted KNN xf_plan: ")
print(denorm_test_predictions_tot_df)
if predict_zf_L_plan:
# ----- FINAL POSTURE SELECTION: LOWER BOUNDS --------------------------------------------- #
if not outputs_zf_L_plan_df.empty:
outputs_zf_L_plan_df_max = pd.Series.from_csv(dir_path_zf_L_plan + "/zf_L_plan_max.csv", sep=',')
outputs_zf_L_plan_df_min = pd.Series.from_csv(dir_path_zf_L_plan + "/zf_L_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zf_L_plan_rdm_prediction = task_1_sample[cols_zf_L_plan_tot]
if (print_en_zf_L_plan):
print("Random zf_L_plan: ")
print(zf_L_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zf_L_plan,
hidden_units=units_zf_L_plan_class,
model_dir=dir_path_zf_L_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zf_L_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zf_L_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted NN zf_L_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zf_L_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zf_L_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted SVM zf_L_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zf_L_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zf_L_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted KNN zf_L_plan:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zf_L')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zf_L_plan_rdm_prediction = test_pred_df.copy()
zf_L_plan_nn_prediction = test_pred_df.copy()
zf_L_plan_svm_prediction = test_pred_df.copy()
zf_L_plan_knn_prediction = test_pred_df.copy()
if(print_en_zf_L_plan):
print("Random zf_L:")
print(test_pred_df)
print("Predicted NN zf_L:")
print(test_pred_df)
print("Predicted SVM zf_L:")
print(test_pred_df)
print("Predicted KNN zf_L:")
print(test_pred_df)
if predict_zf_U_plan:
# ----- FINAL POSTURE SELECTION: UPPER BOUNDS --------------------------------------------- #
if not outputs_zf_U_plan_df.empty:
outputs_zf_U_plan_df_max = pd.Series.from_csv(dir_path_zf_U_plan + "/zf_U_plan_max.csv", sep=',')
outputs_zf_U_plan_df_min = pd.Series.from_csv(dir_path_zf_U_plan + "/zf_U_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zf_U_plan_rdm_prediction = task_1_sample[cols_zf_U_plan_tot]
if (print_en_zf_U_plan):
print("Random zf_U_plan: ")
print(zf_U_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zf_U_plan,
hidden_units=units_zf_U_plan_class,
model_dir=dir_path_zf_U_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zf_U_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zf_U_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted NN zf_U_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zf_U_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zf_U_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted SVM zf_U_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zf_U_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zf_U_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted KNN zf_U_plan:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zf_U')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zf_U_plan_rdm_prediction = test_pred_df.copy()
zf_U_plan_nn_prediction = test_pred_df.copy()
zf_U_plan_svm_prediction = test_pred_df.copy()
zf_U_plan_knn_prediction = test_pred_df.copy()
if(print_en_zf_U_plan):
print("Random zf_U:")
print(test_pred_df)
print("Predicted NN zf_U:")
print(test_pred_df)
print("Predicted SVM zf_U:")
print(test_pred_df)
print("Predicted KNN zf_U:")
print(test_pred_df)
if predict_dual_f_plan:
# ----- FINAL POSTURE SELECTION: DUAL VARIABLES --------------------------------------------- #
if not outputs_dual_f_plan_df.empty:
outputs_dual_f_plan_df_max = pd.Series.from_csv(dir_path_dual_f_plan + "/dual_f_plan_max.csv", sep=',')
outputs_dual_f_plan_df_min = pd.Series.from_csv(dir_path_dual_f_plan + "/dual_f_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
dual_f_plan_rdm_prediction = task_1_sample[cols_dual_f_plan_tot]
if (print_en_dual_f_plan):
print("Random dual_f_plan: ")
print(dual_f_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_dual_f_plan,
hidden_units=units_dual_f_plan_class,
model_dir=dir_path_dual_f_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_dual_f_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_dual_f_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted NN dual_f_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_dual_f_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_dual_f_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted SVM dual_f_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_dual_f_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_dual_f_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted KNN dual_f_plan:")
print(denorm_test_predictions_tot_df)
if predict_x_bounce:
# ----- BOUNCE POSTURE SELECTION: BOUNCE POSTURE --------------------------------------------- #
if not outputs_x_bounce_df.empty:
outputs_x_bounce_df_max = pd.Series.from_csv(dir_path_x_bounce+"/x_bounce_max.csv",sep=',')
outputs_x_bounce_df_min = pd.Series.from_csv(dir_path_x_bounce + "/x_bounce_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
x_bounce_rdm_prediction = task_1_sample[cols_x_bounce_tot]
if (print_en_x_bounce):
print("Random x_bounce: ")
print(x_bounce_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_x_bounce,
hidden_units=units_x_bounce_class,
model_dir=dir_path_x_bounce+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#print("Cluster:")
#print(n_cluster)
# TO DO
#n_comps = n_pca_comps_x_bounce
#if (n_cluster==2 or n_cluster==5):
# n_comps = n_pca_comps_x_bounce - 3
#elif(n_cluster==0 or n_cluster==3 or n_cluster==4):
# n_comps = n_pca_comps_x_bounce - 2
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_x_bounce,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_x_bounce + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1,len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros,columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_nn_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted NN x_bounce:")
print(denorm_test_predictions_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_x_bounce + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_x_bounce + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_svm_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted SVM x_bounce:")
print(denorm_test_predictions_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_x_bounce + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_x_bounce + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_knn_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted KNN x_bounce:")
print(denorm_test_predictions_df)
if predict_zb_L:
# ---------------- BOUNCE POSTURE SELECTION: LOWER BOUNDS --------------------------------------------- #
if not outputs_zb_L_df.empty:
outputs_zb_L_df_max = pd.Series.from_csv(dir_path_zb_L + "/zb_L_max.csv", sep=',')
outputs_zb_L_df_min = pd.Series.from_csv(dir_path_zb_L + "/zb_L_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zb_L_rdm_prediction = task_1_sample[cols_zb_L_tot]
if (print_en_zb_L):
print("Random zb_L: ")
print(zb_L_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zb_L,
hidden_units=units_zb_L_class,
model_dir=dir_path_zb_L+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zb_L,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zb_L + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted NN zb_L:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zb_L + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zb_L + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted SVM zb_L:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zb_L + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zb_L + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted KNN zb_L:")
print(denorm_test_predictions_tot_df)
if predict_zb_U:
# ----- BOUNCE POSTURE SELECTION: UPPER BOUNDS --------------------------------------------- #
if not outputs_zb_U_df.empty:
outputs_zb_U_df_max = pd.Series.from_csv(dir_path_zb_U + "/zb_U_max.csv", sep=',')
outputs_zb_U_df_min = pd.Series.from_csv(dir_path_zb_U + "/zb_U_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zb_U_rdm_prediction = task_1_sample[cols_zb_U_tot]
if (print_en_zb_U):
print("Random zb_U: ")
print(zb_U_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zb_U,
hidden_units=units_zb_U_class,
model_dir=dir_path_zb_U+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zb_U,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zb_U + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted NN zb_U:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zb_U + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zb_U + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted SVM zb_U:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zb_U + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zb_U + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted KNN zb_U:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zb_U')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zb_U_rdm_prediction = test_pred_df.copy()
zb_U_nn_prediction = test_pred_df.copy()
zb_U_svm_prediction = test_pred_df.copy()
zb_U_knn_prediction = test_pred_df.copy()
if(print_en_zb_U):
print("Random zb_U:")
print(test_pred_df)
print("Predicted NN zb_U:")
print(test_pred_df)
print("Predicted SVM zb_U:")
print(test_pred_df)
print("Predicted KNN zb_U:")
print(test_pred_df)
if predict_dual_bounce:
# ----- BOUNCE POSTURE SELECTION: DUAL VARIABLES --------------------------------------------- #
if not outputs_dual_bounce_df.empty:
outputs_dual_bounce_df_max = pd.Series.from_csv(dir_path_dual_bounce+"/dual_bounce_max.csv",sep=',')
outputs_dual_bounce_df_min = pd.Series.from_csv(dir_path_dual_bounce + "/dual_bounce_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
dual_bounce_rdm_prediction = task_1_sample[cols_dual_bounce_tot]
if (print_en_dual_bounce):
print("Random dual_bounce: ")
print(dual_bounce_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_dual_bounce,
hidden_units=units_dual_bounce_class,
model_dir=dir_path_dual_bounce+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#print("Cluster number:")
#print(n_cluster)
#n_comps = n_pca_comps_dual_bounce
#if(n_cluster==0):
# n_comps = n_pca_comps_dual_bounce - 5
#elif (n_cluster==2 or n_cluster==3):
# n_comps = n_pca_comps_dual_bounce - 7
#elif(n_cluster==4):
# n_comps = n_pca_comps_dual_bounce - 6
#elif(n_cluster==5):
# n_comps = n_pca_comps_dual_bounce - 4
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
nn_regressor = tf.estimator.DNNRegressor(feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_dual_bounce,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_dual_bounce + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1, len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted NN dual_bounce:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_dual_bounce + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
svm_regressor = joblib.load(dir_path_dual_bounce + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted SVM dual_bounce:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_dual_bounce + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
knn_regressor = joblib.load(dir_path_dual_bounce + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted KNN dual_bounce:")
print(denorm_test_predictions_tot_df)
# ------------------- Write down the prediction of the results ----------------------------------- #
pred_file = open(pred_file_path, "w")
pred_file.write("#### Dual variables and solutions of the optimization problems ####\n")
# ----------------- Random -------------------------- #
pred_file.write("### Warm start with Random ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_rdm_plan=")
xf_plan_size = len(xf_plan_rdm_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_rdm_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_rdm_plan=")
zf_L_plan_size = len(zf_L_plan_rdm_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_rdm_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_rdm_plan=")
zf_U_plan_size = len(zf_U_plan_rdm_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_rdm_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_rdm_plan=")
dual_f_plan_size = len(dual_f_plan_rdm_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_rdm_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_rdm_bounce=")
x_bounce_size = len(x_bounce_rdm_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_rdm_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_rdm_bounce=")
zb_L_size = len(zb_L_rdm_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_rdm_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_rdm_bounce=")
zb_U_size = len(zb_U_rdm_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_rdm_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_rdm_bounce=")
dual_bounce_size = len(dual_bounce_rdm_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_rdm_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- Neural Network -------------------------- #
pred_file.write("### Warm start with Neural Network ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_nn_plan=")
xf_plan_size = len(xf_plan_nn_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_nn_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_nn_plan=")
zf_L_plan_size = len(zf_L_plan_nn_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_nn_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_nn_plan=")
zf_U_plan_size = len(zf_U_plan_nn_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_nn_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_nn_plan=")
dual_f_plan_size = len(dual_f_plan_nn_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_nn_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_nn_bounce=")
x_bounce_size = len(x_bounce_nn_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_nn_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_nn_bounce=")
zb_L_size = len(zb_L_nn_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_nn_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_nn_bounce=")
zb_U_size = len(zb_U_nn_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_nn_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_nn_bounce=")
dual_bounce_size = len(dual_bounce_nn_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_nn_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- Support Vector Machines -------------------------- #
pred_file.write("### Warm start with Support Vector Machines ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_svm_plan=")
xf_plan_size = len(xf_plan_svm_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_svm_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_svm_plan=")
zf_L_plan_size = len(zf_L_plan_svm_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_svm_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_svm_plan=")
zf_U_plan_size = len(zf_U_plan_svm_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_svm_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_svm_plan=")
dual_f_plan_size = len(dual_f_plan_svm_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_svm_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_svm_bounce=")
x_bounce_size = len(x_bounce_svm_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_svm_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_svm_bounce=")
zb_L_size = len(zb_L_svm_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_svm_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_svm_bounce=")
zb_U_size = len(zb_U_svm_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_svm_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_svm_bounce=")
dual_bounce_size = len(dual_bounce_svm_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_svm_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- K-Nearest Neighbors -------------------------- #
pred_file.write("### Warm start with K-Nearest Neighbors ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_knn_plan=")
xf_plan_size = len(xf_plan_knn_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_knn_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_knn_plan=")
zf_L_plan_size = len(zf_L_plan_knn_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_knn_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_knn_plan=")
zf_U_plan_size = len(zf_U_plan_knn_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_knn_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_knn_plan=")
dual_f_plan_size = len(dual_f_plan_knn_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_knn_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_knn_bounce=")
x_bounce_size = len(x_bounce_knn_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_knn_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_knn_bounce=")
zb_L_size = len(zb_L_knn_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_knn_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_knn_bounce=")
zb_U_size = len(zb_U_knn_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_knn_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_knn_bounce=")
dual_bounce_size = len(dual_bounce_knn_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_knn_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.close()
| mit |
anilmuthineni/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
xyguo/scikit-learn | sklearn/tests/test_base.py | 45 | 7049 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
davidsamu/seal | seal/decoding/decode.py | 1 | 15548 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to perform decoding analyses and process results.
@author: David Samu
"""
import warnings
import numpy as np
import pandas as pd
from quantities import deg
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from seal.analysis import direction, stats
from seal.decoding import decutil
from seal.util import util, ua_query, constants
# For reproducable (deterministic) results.
seed = 8257 # just a random number
verbose = True
# %% Core decoding functions.
def fit_LRCV(LRCV, X, y):
"""
Fit cross-validated logistic regression model on data and return results.
"""
LRCV.fit(X, y)
# Get results for best fit.
classes = list(LRCV.scores_.keys()) if is_binary(y) else LRCV.classes_
C = LRCV.C_[0] # should be the same for all classes (as refit=True)
# Prediction score of each CV fold.
i_best = np.where(LRCV.Cs_ == C)[0][0] # index of reg. giving best result
# Scores should be the same across different classes (multinomial case).
score = LRCV.scores_[list(LRCV.scores_.keys())[0]][:, i_best].squeeze()
return classes, C, score
def run_logreg(X, y, n_perm=0, n_pshfl=0, cv_obj=None, ncv=5, Cs=None,
multi_class=None, solver=None, class_weight='balanced'):
"""
Run logistic regression with number of cross-validation folds (ncv) and
internal regularization over a number of regularisation parameters (Cs).
"""
# Remove missing values from data.
idx = np.logical_and(np.all(~np.isnan(X), 1),
[yi is not None for yi in y])
X, y = np.array(X[idx]), np.array(y[idx])
# Init data params.
classes, vcounts = np.unique(y, return_counts=True)
ntrials, nfeatures = X.shape
nclasses = len(classes)
binary = is_binary(y)
# Deal with binary case.
class_names = [classes[1]] if binary else classes
nclasspars = 1 if binary else nclasses
# Init results.
res = [('score', np.nan * np.zeros(ncv)), ('class_names', class_names),
('coef', np.nan * np.zeros((nclasspars, nfeatures))), ('C', np.nan),
('perm', pd.Series(np.nan, index=['mean', 'std', 'pval'])),
('psdo', pd.Series(np.nan, index=['mean', 'std', 'pval']))]
res = util.series_from_tuple_list(res)
# Check that there's at least two classes.
if nclasses < 2:
if verbose:
warnings.warn('Number of different values in y is less then 2!')
return res
# Check that we have enough trials to split into folds during CV.
if np.any(vcounts < ncv):
if verbose:
warnings.warn('Not enough trials to split into folds during CV')
return res
# Init LogRegCV parameters.
if multi_class is None:
multi_class = 'ovr' if binary else 'multinomial'
if solver is None:
solver = 'lbfgs' if len(y) < 500 else 'sag'
if cv_obj is None:
cv_obj = StratifiedKFold(n_splits=ncv, shuffle=True,
random_state=seed)
if Cs is None:
Cs = [1] # no regularisation by default
# Create LogRegress solver.
LRCV = LogisticRegressionCV(solver=solver, Cs=Cs, cv=cv_obj,
multi_class=multi_class, refit=True,
class_weight=class_weight)
# Fit logistic regression.
class_names, C, score = fit_LRCV(LRCV, X, y)
res['C'] = C
res['score'] = score
# Coefficients (weights) of features by predictors.
coef = LRCV.coef_
res['coef'] = coef
# Run permutation testing.
if n_perm > 0:
r = permutation_test_score(LRCV, X, y, scoring='accuracy', cv=cv_obj,
n_permutations=n_perm, random_state=seed)
_, perm_scores, perm_p = r
res['perm']['mean'] = perm_scores.mean()
res['perm']['std'] = perm_scores.std()
res['perm']['pval'] = perm_p
# Run decoding on rate matrix with trials shuffled within units.
if n_pshfl > 0:
shfld_scores = np.array([fit_LRCV(LRCV, pop_shfl(X, y), y)[2]
for i in range(n_pshfl)]).mean(1)
res['psdo']['mean'] = shfld_scores.mean()
res['psdo']['std'] = shfld_scores.std()
res['psdo']['pval'] = stats.perm_pval(score.mean(), shfld_scores)
return res
# %% Utility functions for model fitting.
def is_binary(y):
"""Is it a binary or multinomial classification?"""
binary = len(np.unique(y)) == 2
return binary
def pop_shfl(X, y):
"""Return X predictors shuffled within columns for each y level."""
ncols = X.shape[1]
Xc = X.copy()
# For trials corresponding to given y value.
for v in np.unique(y):
idxs = np.where(y == v)[0]
# For each column (predictor) independently.
for ifeat in range(ncols):
# Shuffle trials of predictor (feature).
Xc[idxs, ifeat] = Xc[np.random.permutation(idxs), ifeat]
return Xc
def zscore_by_cond(X, vzscore_by):
"""Z-score rate values by condition levels within each unit."""
Xc = X.copy()
for v, idxs in X.index.groupby(vzscore_by).items():
vX = Xc.loc[idxs, :]
mean = vX.mean()
std = vX.std(ddof=0)
Xc.loc[idxs, :] = (vX - mean)/std
return Xc
def separate_by_cond(X, vcond):
"""Separate rate values by condition levels."""
Xsep = [X.loc[idxs, :] for v, idxs in X.index.groupby(vcond).items()]
return Xsep
# %% Wrappers to run decoding over time and different stimulus periods.
def run_logreg_across_time(rates, vfeat, vzscore_by=None, n_perm=0,
n_pshfl=0, corr_trs=None, ncv=5, Cs=None):
"""Run logistic regression analysis across trial time."""
# Correct and error trials and targets.
if corr_trs is None:
corr_trs = pd.Series(True, index=vfeat.index)
err_trs = ~corr_trs
corr_feat, err_feat = [vfeat[trs] for trs in [corr_trs, err_trs]]
# Check that we have enough trials to split into folds during CV.
vcounts = corr_feat.value_counts()
if (vcounts < ncv).any():
if verbose:
warnings.warn('Not enough trials to do decoding with CV')
return
# Prepare data for running analysis in pool.
LRparams = []
t_uids = []
for t, rt in rates.items():
rtmat = rt.unstack().T # get rates and format to (trial x unit) matrix
if vzscore_by is not None: # z-score by condition level
rtmat = zscore_by_cond(rtmat, vzscore_by)
corr_rates, err_rates = [rtmat.loc[trs]
for trs in [corr_trs, err_trs]]
LRparams.append((corr_rates, corr_feat, n_perm,
n_pshfl, None, ncv, Cs))
t_uids.append(rtmat.columns)
# Run logistic regression at each time point.
res = zip(*util.run_in_pool(run_logreg, LRparams))
lScores, lClasses, lCoefs, lC, lPerm, lPsdo = res
# Put results into series and dataframes.
tvec = rates.columns
# Best regularisation parameter value.
C = pd.Series(list(lC), index=tvec)
# Prediction scores over time.
Scores = pd.DataFrame.from_records(lScores, index=tvec).T
# Coefficients (unit by value) over time.
coef_ser = {t: pd.DataFrame(lCoefs[i], columns=t_uids[i],
index=lClasses[i]).unstack()
for i, t in enumerate(tvec)}
Coefs = pd.concat(coef_ser, axis=1)
# Permutation and population shuffling results.
Perm = pd.concat(lPerm, axis=1, keys=tvec)
Psdo = pd.concat(lPsdo, axis=1, keys=tvec)
# Collect results.
res = [('Scores', Scores), ('Coefs', Coefs), ('C', C),
('Perm', Perm), ('Psdo', Psdo)]
res = util.series_from_tuple_list(res)
return res
def run_prd_pop_dec(UA, rec, task, stim, uids, trs, feat, zscore_by,
even_by, PDD_offset, PPDc, PADc, prd, ref_ev, nrate,
n_perm, n_pshfl, sep_err_trs, ncv, Cs, tstep):
"""Run logistic regression analysis on population for time period."""
# Init.
TrData = ua_query.get_trial_params(UA, rec, task)
# Homogenize occurance of values of a feature (same or different as the
# one to decode), by dropping some trials.
if not util.is_null(even_by):
# ntrs: number of trials of least frequent feature value.
vfeven = TrData[even_by][trs]
ntrs = vfeven.value_counts().min()
# Select ntrs number of trials from beginning of recording.
ltrs = [grp.index[:ntrs] for v, grp in vfeven.groupby(vfeven)]
trs = pd.Int64Index(pd.Series(np.concatenate(ltrs)).sort_values())
# Select only trials where direction is PDD/PAD +- offset.
if not util.is_null(PDD_offset):
ppds = [d for d in [PPDc, PADc]]
dd = pd.Series({float(d): np.min([direction.deg_diff(d, pd)
for pd in ppds])
for d in constants.all_dirs})
dirs = dd.index[dd <= PDD_offset]
pdd_trs = TrData.index[TrData[(stim, 'Dir')].isin(dirs)]
trs = trs[trs.isin(pdd_trs)]
# Get target vector.
vfeat = TrData[feat][trs].squeeze()
# Binarize direction target vector to decode if PDD/PAD is requested.
if ('Dir' in feat) and (not util.is_null(PDD_offset)):
vbin = [np.argmin([direction.deg_diff(d*deg, pd) for pd in ppds])
for d in vfeat]
vfeat = pd.Series(vbin, vfeat.index)
# Init levels of separation and z-scoring condition.
vzscore_by = (None if zscore_by in (None, np.nan) else
TrData.loc[trs].copy()[zscore_by].squeeze())
# Get FR matrix.
rates = ua_query.get_rate_matrix(UA, rec, task, uids, prd,
ref_ev, nrate, trs, tstep)
# Separate correct trials from error trials, if requested.
corr_trs = TrData.correct[vfeat.index] if sep_err_trs else None
# Run decoding.
dec_res = run_logreg_across_time(rates, vfeat, vzscore_by, n_perm,
n_pshfl, corr_trs, ncv, Cs)
if dec_res is None:
return
# Add # units, trials and classes to results.
nunits = len(dec_res['Coefs'].index.get_level_values(0).unique())
ncls = len(dec_res['Coefs'].index.get_level_values(1).unique())
ntrs = (sum(corr_trs) if corr_trs is not None else
len(rates.index.get_level_values(1).unique()))
if ncls == 1: # binary case
ncls = 2
res = {'dec_res': dec_res, 'nunits': nunits, 'ntrs': ntrs, 'ncls': ncls}
return res
def run_pop_dec(UA, rec, task, uids, trs, prd_pars, nrate, n_perm, n_pshfl,
sep_err_trs, ncv, Cs, tstep, PPDc, PADc):
"""Run population decoding on multiple periods across given trials."""
# Init.
r = {'Scores': [], 'Coefs': [], 'C': [], 'Perm': [], 'Psdo': []}
nunits, ntrs, ncls = pd.Series(), pd.Series(), pd.Series()
stims = prd_pars.index
tshifts, truncate_prds = [], []
for stim in stims:
print(' ' + stim)
# Get params.
pars_names = ['prd', 'ref_ev', 'feat', 'sep_by',
'zscore_by', 'even_by', 'PDD_offset']
pars = prd_pars.loc[stim, pars_names]
prd, ref_ev, feat, sep_by, zscore_by, even_by, PDD_offset = pars
# Run decoding.
res = run_prd_pop_dec(UA, rec, task, stim, uids, trs, feat, zscore_by,
even_by, PDD_offset, PPDc, PADc, prd, ref_ev,
nrate, n_perm, n_pshfl, sep_err_trs, ncv, Cs,
tstep)
if res is None:
continue
# Extract decording results.
r = {resname: rres + [res['dec_res'][resname]]
for resname, rres in r.items()}
# Collect parameters.
nunits[stim] = res['nunits']
ntrs[stim] = res['ntrs']
ncls[stim] = res['ncls']
# Update concatenation params.
tshifts.append(prd_pars.stim_start[stim])
truncate_prds.append(list(prd_pars.loc[stim, ['prd_start',
'prd_stop']]))
# No successfully decoded stimulus period.
if not len(r['Scores']):
print('No stimulus period decoding finished successfully.')
return
# Concatenate stimulus-specific results.
rem_all_nan_units, rem_any_nan_times = True, True
res = {rn: util.concat_stim_prd_res(rr, tshifts, truncate_prds,
rem_all_nan_units, rem_any_nan_times)
for rn, rr in r.items()}
# Add # units, trials and classes.
res['nunits'] = nunits
res['ntrials'] = ntrs
res['nclasses'] = ncls
return res
def dec_recs_tasks(UA, RecInfo, recs, tasks, feat, stims, sep_by, zscore_by,
even_by, PDD_offset, res_dir, nrate, tstep, ncv, Cs,
n_perm, n_pshfl, sep_err_trs, n_most_DS, PPDres):
"""Run decoding across tasks and recordings."""
print('\nDecoding: ' + util.format_feat_name(feat))
# Set up decoding params.
prd_pars = util.init_stim_prds(stims, feat, sep_by, zscore_by,
even_by, PDD_offset)
fres = decutil.res_fname(res_dir, 'results', tasks, feat, nrate, ncv, Cs,
n_perm, n_pshfl, sep_err_trs, sep_by, zscore_by,
even_by, PDD_offset, n_most_DS, tstep)
rt_res = {}
for rec in recs:
print('\n' + ' '.join(rec))
for task in tasks:
rt = rec + (task,)
# Skip recordings that are missing or undecodable.
if ((rt not in RecInfo.index) or
not RecInfo.loc[rt, 'nunits']):
continue
# Let's not decode saccade and correct/incorrect for passive task.
if ('Pas' in task) and (feat in ['saccade', 'correct']):
continue
# Init.
print(' ' + task)
rt_res[(rec, task)] = {}
# Init units, trials and trial params.
recinfo = RecInfo.loc[rt]
elec = recinfo.elec
uids = [rec + (elec, ic, iu) for ic, iu in recinfo.units]
inc_trs = recinfo.trials
PPDc, PADc = PPDres.loc[rt, ('PPDc', 'PADc')]
# Select n most DS units (or all if n_most_DS is 0).
utids = [uid + (task, ) for uid in uids]
n_most_DS_utids = ua_query.select_n_most_DS_units(UA, utids,
n_most_DS)
uids = [utid[:-1] for utid in n_most_DS_utids]
# Split by value condition (optional).
TrData = ua_query.get_trial_params(UA, rec, task)
ltrs = (inc_trs.groupby(TrData[sep_by].loc[inc_trs])
if not util.is_null(sep_by) else {'all': inc_trs})
ltrs = pd.Series(ltrs)
# Decode feature in each period.
tr_res = {}
for v, trs in ltrs.items():
res = run_pop_dec(UA, rec, task, uids, trs, prd_pars, nrate,
n_perm, n_pshfl, sep_err_trs, ncv, Cs,
tstep, PPDc, PADc)
if not util.is_null(res):
tr_res[v] = res
rt_res[(rec, task)] = tr_res
# Save results.
util.write_objects({'rt_res': rt_res}, fres)
| gpl-3.0 |
christopherburke/MEPL | test_show_point_images.py | 1 | 1704 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 13:39:42 2016
@author: cjburke
Test of MEPL to illustrate images of a point source
"""
import microlens as mepl
import matplotlib.pyplot as plt
import numpy as np
# Specify lens properties
lens_separation = 0.75
lens_mass1 = 1.0
lens_mass2 = 0.01
# Specify source center
source_x = 0.01
source_y = -0.01
# Instantiate an empty lens object
lens = mepl.lens_properties()
# Calculate lens properties
# lens 1 at origin and lens 2 on x-axis
lens.set_lens_properties(0.0, 0.0, lens_mass1, \
lens_separation, 0.0, lens_mass2)
# Instantiate empty source vertex
src_obj = mepl.source_plane_vertex()
# set source position
# for point source the radius = 0.0 and angle = 0.0
src_obj.set_source_position(source_x, source_y, 0.0, 0.0)
# Solve for image position
imageLocs, jacobians, paritys, kappas, kappaDerivs = \
mepl.solve_binary_roots(src_obj, lens)
# Calculate magnification of the point source approximation
magnifs = 1.0/np.abs(jacobians)
magnifTotal = np.sum(magnifs)
tmpx = np.real(imageLocs)
tmpy = np.imag(imageLocs)
for i in range(len(imageLocs)):
print "Image {0:d} x: {1:6.4f} y: {2:6.4f} Mag: {3:7.3f}".format( \
i, tmpx[i], tmpy[i], magnifs[i])
print "Total Mag: {0:7.3f}".format(magnifTotal)
myorange=tuple(np.array([230.0,159.0,0.0])/255.0)
myskyblue=tuple(np.array([86.0,180.0,233.0])/255.0)
# plot lens1 and lens 2 as black circle
plt.plot([0.0, lens_separation],[0.0, 0.0], 'ok')
# plot source as orange circle
plt.plot([source_x], [source_y], 'o', color=myorange)
# plot images as blue circle
plt.plot(tmpx, tmpy, 'o', color=myskyblue)
plt.show()
| gpl-3.0 |
JVP3122/Kaggle | Titanic/code/plot_learning_curve.py | 3 | 4525 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
# print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
"""
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20PERCENT data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
""" | apache-2.0 |
btabibian/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/utils/estimator_checks.py | 4 | 92964 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
from functools import partial
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
from sklearn.externals.six.moves import zip
from sklearn.utils import IS_PYPY, _IS_32BIT
from sklearn.utils._joblib import hash, Memory
from sklearn.utils.testing import assert_raises, _get_args
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import create_memmap_backed_data
from sklearn.utils import is_scalar_nan
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.base import (clone, ClusterMixin,
BaseEstimator, is_classifier, is_regressor,
is_outlier_detector)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.linear_model.stochastic_gradient import BaseSGD
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import (rbf_kernel, linear_kernel,
pairwise_distances)
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import (has_fit_parameter, _num_samples,
LARGE_SPARSE_SUPPORTED)
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor',
'GaussianProcessRegressor', 'TransformedTargetRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
ALLOW_NAN = ['Imputer', 'SimpleImputer', 'MissingIndicator',
'MaxAbsScaler', 'MinMaxScaler', 'RobustScaler', 'StandardScaler',
'PowerTransformer', 'QuantileTransformer']
def _yield_non_meta_checks(name, estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_sample_weights_invariance
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
yield check_complex_data
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ALLOW_NAN:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "ComplementNB", "LabelPropagation",
"LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
yield check_supervised_y_no_nan
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.full(10, np.inf)
y = multioutput_estimator_convert_y_2d(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield partial(check_transformer_general, readonly_memmap=True)
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(name, estimator):
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, 'fit_predict'):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, 'predict'):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_all_checks(name, estimator):
for check in _yield_non_meta_checks(name, estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(name, estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(name, estimator):
yield check
if hasattr(estimator, 'transform'):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(name, estimator):
yield check
yield check_fit2d_predict1d
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, Estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"
and not isinstance(estimator, BaseSGD)):
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
# FIXME: The default number of trees was changed and is set to 'warn'
# for some of the ensemble methods. We need to catch this case to avoid
# an error during the comparison. To be reverted in 0.22.
if estimator.n_estimators == 'warn':
estimator.set_params(n_estimators=5)
else:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if estimator.__class__.__name__ == "IsolationForest":
# XXX to be removed in 0.22.
# this is used because the old IsolationForest does not
# respect the outlier detection API and thus and does not
# pass the outlier detection common tests.
estimator.set_params(behaviour='new')
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
"""An object that is convertible to an array
Parameters
----------
data : array_like
The data.
"""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_pairwise(estimator):
"""Returns True if estimator has a _pairwise attribute set to True.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
return bool(getattr(estimator, "_pairwise", False))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == 'precomputed')
def pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):
if _is_pairwise_metric(estimator):
return pairwise_distances(X, metric='euclidean')
if _is_pairwise(estimator):
return kernel(X, X)
return X
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == 'csr'
yield 'csr', X_csr.copy()
for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:
yield sparse_format, X_csr.asformat(sparse_format)
if LARGE_SPARSE_SUPPORTED:
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat('coo')
X_coo.row = X_coo.row.astype('int64')
X_coo.col = X_coo.col.astype('int64')
yield "coo_64", X_coo
for sparse_format in ['csc', 'csr']:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype('int64')
X.indptr = X.indptr.astype('int64')
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = pairwise_estimator_convert_X(X, estimator_orig)
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in ['Scaler', 'StandardScaler']:
estimator = clone(estimator).set_params(with_mean=False)
else:
estimator = clone(estimator)
# fit and predict
try:
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except (TypeError, ValueError) as e:
if 'sparse' not in repr(e).lower():
if "64" in matrix_format:
msg = ("Estimator %s doesn't seem to support %s matrix, "
"and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)")
raise AssertionError(msg % (name, matrix_format))
else:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not"
" the case." % name)
raise
except Exception as e:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4]])
X = pd.DataFrame(pairwise_estimator_convert_X(X, estimator_orig))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2])
weights = pd.Series([1] * 8)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),
estimator_orig)
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_invariance(name, estimator_orig):
# check that the estimators yield same results for
# unit weights and no weights
if (has_fit_parameter(estimator_orig, "sample_weight") and
not (hasattr(estimator_orig, "_pairwise")
and estimator_orig._pairwise)):
# We skip pairwise because the data is not pairwise
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.dtype('float'))
y = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype('int'))
estimator1.fit(X, y=y, sample_weight=np.ones(shape=len(y)))
estimator2.fit(X, y=y, sample_weight=None)
for method in ["predict", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X)
X_pred2 = getattr(estimator2, method)(X)
assert_allclose(X_pred1, X_pred2,
err_msg="For %s sample_weight=None is not"
" equivalent to sample_weight=ones"
% name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)
X = X.astype(object)
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_complex_data(name, estimator_orig):
# check that estimators raise an exception on providing complex data
X = np.random.sample(10) + 1j * np.random.sample(10)
X = X.reshape(-1, 1)
y = np.random.sample(10) + 1j * np.random.sample(10)
estimator = clone(estimator_orig)
assert_raises_regex(ValueError, "Complex data not supported",
estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features))
for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini bathes or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"score_samples", "predict_proba"]:
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
# TODO remove cases when corrected
if (name, method) in [('SVC', 'decision_function'),
('SparsePCA', 'transform'),
('MiniBatchSparsePCA', 'transform'),
('BernoulliRBM', 'score_samples')]:
raise SkipTest(msg)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X)
assert_allclose(result_full, result_by_batch,
atol=1e-7, err_msg=msg)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample",
"1 class", "one class"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator, 1)
msgs = ["1 feature(s)", "n_features = 1", "n_features=1"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
assert_raises(ValueError, estimator.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
with assert_raises((AttributeError, ValueError), msg="The unfitted "
"transformer {} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform.".format(name)):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _IS_32BIT:
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert_equal(_num_samples(X_pred2), n_samples)
assert_equal(_num_samples(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
with assert_raises(ValueError, msg="The transformer {} does "
"not raise an error when the number of "
"features in transform is different from"
" the number of features in "
"fit.".format(name)):
transformer.transform(X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _IS_32BIT:
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = pairwise_estimator_convert_X(X_train_32, estimator_orig)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
with assert_raises(ValueError, msg="The estimator {} does not"
" raise an error when an empty data is used "
"to train. Perhaps use "
"check_array in train.".format(name)):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1]))
msg = (r"0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),
estimator_orig)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
# include NaN values when the estimator should deal with them
if name in ALLOW_NAN:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with assert_raises(ValueError,
msg="The estimator {} does not raise an"
" error when the number of features"
" changes between calls to "
"partial_fit.".format(name)):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert_equal(pred.shape, (n_samples,))
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert_in(pred.dtype, [np.dtype('int32'), np.dtype('int64')])
assert_in(pred2.dtype, [np.dtype('int32'), np.dtype('int64')])
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(labels_sorted, np.arange(labels_sorted[0],
labels_sorted[-1] + 1))
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert_true(labels_sorted[0] in [0, -1])
# Labels should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert_greater_equal(n_clusters - 1, labels_sorted[-1])
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig, readonly_memmap=False):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = pairwise_estimator_convert_X(X, classifier_orig)
set_random_state(classifier)
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of labels."
" Perhaps use check_X_y in fit.".format(name)):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
if _is_pairwise(classifier):
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when shape of X"
"in predict is not equal to (n_test_samples,"
"n_training_samples)".format(name)):
classifier.predict(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of features "
"in predict is different from the number of"
" features in fit.".format(name)):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes == 3 and
# 1on1 of LibSVM works differently
not isinstance(classifier, BaseLibSVM)):
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if _is_pairwise(classifier):
with assert_raises(ValueError, msg="The classifier {} does"
" not raise an error when the "
"shape of X in decision_function is "
"not equal to (n_test_samples, "
"n_training_samples) in fit."
.format(name)):
classifier.decision_function(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg="The classifier {} does"
" not raise an error when the number "
"of features in decision_function is "
"different from the number of features"
" in fit.".format(name)):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input for predict_proba
if _is_pairwise(classifier_orig):
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the shape of X"
"in predict_proba is not equal to "
"(n_test_samples, n_training_samples)."
.format(name)):
classifier.predict_proba(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of "
"features in predict_proba is different "
"from the number of features in fit."
.format(name)):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
X, _ = make_blobs(n_samples=300, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
assert decision.dtype == np.dtype('float')
score = estimator.score_samples(X)
assert score.dtype == np.dtype('float')
# raises error on malformed input for predict
assert_raises(ValueError, estimator.predict, X.T)
# decision_function agrees with predict
decision = estimator.decision_function(X)
assert decision.shape == (n_samples,)
dec_pred = (decision >= 0).astype(np.int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
assert_raises(ValueError, estimator.decision_function, X.T)
# decision_function is a translation of score_samples
y_scores = estimator.score_samples(X)
assert y_scores.shape == (n_samples,)
y_dec = y_scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
assert_raises(ValueError, estimator.score_samples, X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if (hasattr(estimator, 'contamination')
and not hasattr(estimator, 'novelty')):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
contamination = 0.1
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
assert_almost_equal(np.mean(y_pred != 1), contamination)
# raises error when contamination is a scalar and not in [0,1]
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
assert_raises(ValueError, estimator.fit, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_fit_returns_self(name, estimator_orig,
readonly_memmap=False):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
# some want non-negative input
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _boston_subset()
estimator = clone(estimator_orig)
msg = "fit"
if hasattr(estimator, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
estimator.predict, X)
if hasattr(estimator, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
estimator.decision_function, X)
if hasattr(estimator, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
estimator.predict_proba, X)
if hasattr(estimator, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
estimator.predict_log_proba, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_2d(name, estimator_orig):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig)
y = np.arange(10) % 3
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
n_samples, n_features = X.shape
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(np.int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred))))
elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr':
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(y_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, y_exp)),
", ".join(map(str, y_pred))))
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(classes, classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: "
"expected '%s', got '%s'" %
(classifier, ", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_))))
def choose_check_classifiers_labels(name, y, y_names):
return y if name in ["LabelPropagation", "LabelSpreading"] else y_names
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0,
cluster_std=0.1)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,
random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
# We need to make sure that we have non negative data, for things
# like NMF
X_multiclass -= X_multiclass.min() - .1
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
for X, y, y_names in [(X_multiclass, y_multiclass, y_names_multiclass),
(X_binary, y_binary, y_names_binary)]:
for y_names_i in [y_names, y_names.astype('O')]:
y_ = choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = pairwise_estimator_convert_X(X[:50], regressor_orig)
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig, readonly_memmap=False):
X, y = _boston_subset()
X = pairwise_estimator_convert_X(X, regressor_orig)
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
X, y, y_ = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit.".format(name)):
regressor.fit(X, y[:-1])
# fit
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest("Not testing NuSVC class weight as it is ignored.")
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# can't use gram_if_pairwise() here, setting up gram matrix manually
if _is_pairwise(classifier_orig):
X_test = rbf_kernel(X_test, X_train)
X_train = rbf_kernel(X_train, X_train)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
assert_greater(np.mean(y_pred == 0), 0.87)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_no_attributes_set_in_init(name, estimator):
"""Check setting during init. """
if hasattr(type(estimator).__init__, "deprecated_original"):
return
init_params = _get_args(type(estimator).__init__)
if IS_PYPY:
# __init__ signature has additional objects in PyPy
for key in ['obj']:
if key in init_params:
init_params.remove(key)
parents_init_params = [param for params_parent in
(_get_args(parent) for parent in
type(estimator).__mro__)
for param in params_parent]
# Test for no setting apart from parameters during init
invalid_attr = (set(vars(estimator)) - set(init_params)
- set(parents_init_params))
assert_false(invalid_attr,
"Estimator %s should not set any attribute apart"
" from parameters during init. Found attributes %s."
% (name, sorted(invalid_attr)))
# Ensure that each parameter is set in init
invalid_attr = (set(init_params) - set(vars(estimator))
- set(["self"]))
assert_false(invalid_attr,
"Estimator %s should store all parameters"
" as an attribute during init. Did not find "
"attributes %s." % (name, sorted(invalid_attr)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
X = pairwise_estimator_convert_X(X, estimator_orig)
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
X = pairwise_estimator_convert_X(X, estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest("Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic.")
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
if (issubclass(Estimator, BaseSGD) and
init_param.name in ['tol', 'max_iter']):
# To remove in 0.21, when they get their future default values
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
if is_scalar_nan(param_value):
# Allows to set default parameters to np.nan
assert param_value is init_param.default, init_param.name
else:
assert param_value == init_param.default, init_param.name
def multioutput_estimator_convert_y_2d(estimator, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert estimator.n_iter_ >= 1
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_set_params(name, estimator_orig):
# Check that get_params() returns the same thing
# before and after set_params() with some fuzz
estimator = clone(estimator_orig)
orig_params = estimator.get_params(deep=False)
msg = ("get_params result does not match what was passed to set_params")
estimator.set_params(**orig_params)
curr_params = estimator.get_params(deep=False)
assert_equal(set(orig_params.keys()), set(curr_params.keys()), msg)
for k, v in curr_params.items():
assert orig_params[k] is v, msg
# some fuzz values
test_values = [-np.inf, np.inf, None]
test_params = deepcopy(orig_params)
for param_name in orig_params.keys():
default_value = orig_params[param_name]
for value in test_values:
test_params[param_name] = value
try:
estimator.set_params(**test_params)
except (TypeError, ValueError) as e:
e_type = e.__class__.__name__
# Exception occurred, possibly parameter validation
warnings.warn("{} occurred during set_params. "
"It is recommended to delay parameter "
"validation until fit.".format(e_type))
change_warning_msg = "Estimator's parameters changed after " \
"set_params raised {}".format(e_type)
params_before_exception = curr_params
curr_params = estimator.get_params(deep=False)
try:
assert_equal(set(params_before_exception.keys()),
set(curr_params.keys()))
for k, v in curr_params.items():
assert params_before_exception[k] is v
except AssertionError:
warnings.warn(change_warning_msg)
else:
curr_params = estimator.get_params(deep=False)
assert_equal(set(test_params.keys()),
set(curr_params.keys()),
msg)
for k, v in curr_params.items():
assert test_params[k] is v, msg
test_params[param_name] = default_value
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
def check_outliers_fit_predict(name, estimator_orig):
# Check fit_predict for outlier detectors.
X, _ = make_blobs(n_samples=300, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, 'predict'):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
contamination = 0.1
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
assert_almost_equal(np.mean(y_pred != 1), contamination)
# raises error when contamination is a scalar and not in [0,1]
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
assert_raises(ValueError, estimator.fit_predict, X)
| bsd-3-clause |
SDK/metadatachecker | sacm/parserALMA.py | 1 | 36771 | # -*- coding: utf-8 -*-
__author__ = 'sdk'
from sacm import *
from xml.dom import minidom
import pandas as pd
def getASDM(uid=None):
asdmXML = GetXML(uid,'ASDM')
if asdmXML is not False:
asdm = minidom.parseString(asdmXML)
rows = asdm.getElementsByTagName('Table')
asdmList = list()
for i in rows:
if int(i.getElementsByTagName('NumberRows')[0].firstChild.data) != 0:
#print i.getElementsByTagName('Name')[0].firstChild.data,i.getElementsByTagName('NumberRows')[0].firstChild.data
asdmList.append((i.getElementsByTagName('Name')[0].firstChild.data,
i.getElementsByTagName('NumberRows')[0].firstChild.data,
str(i.getElementsByTagName('Entity')[0].getAttribute('entityTypeName')),
str(i.getElementsByTagName('Entity')[0].getAttribute('entityId'))))
toc = asdm.getElementsByTagName('TimeOfCreation')[0].firstChild.data
return asdmList, pd.DataFrame(asdmList, columns=['table', 'numrows', 'typename', 'uid'],) , datetime.datetime.strptime(toc.strip()[0:19],"%Y-%m-%dT%H:%M:%S" )
else:
return False
def getMain(uid=None):
mainXML = GetXML(uid,'Main')
if mainXML is not False:
main = minidom.parseString(mainXML)
mainList = list()
rows = main.getElementsByTagName('row')
for i in rows:
#print i.getElementsByTagName('time')[0].firstChild.data, i.getElementsByTagName('stateId')[0].firstChild.data
mainList.append((sdmTimeString(int(i.getElementsByTagName('time')[0].firstChild.data)),
#i.getElementsByTagName('numAntenna')[0].firstChild.data,
i.getElementsByTagName('timeSampling')[0].firstChild.data,
int(i.getElementsByTagName('interval')[0].firstChild.data),
#i.getElementsByTagName('numIntegration')[0].firstChild.data,
int(i.getElementsByTagName('scanNumber')[0].firstChild.data),
int(i.getElementsByTagName('subscanNumber')[0].firstChild.data),
int(i.getElementsByTagName('dataSize')[0].firstChild.data),
i.getElementsByTagName('dataUID')[0].getElementsByTagName('EntityRef')[0].getAttribute('entityId'),
i.getElementsByTagName('fieldId')[0].firstChild.data,
i.getElementsByTagName('stateId')[0].firstChild.data))
return pd.DataFrame(mainList, columns=['time', 'timeSampling', 'interval', 'scanNumber',
'subscanNumber', 'dataSize', 'dataUID', 'fieldId', 'stateId'])
else:
return False
def getAntennas(uid=None):
antennaXML = GetXML(uid,'Antenna')
if antennaXML is not False:
antenna = minidom.parseString(antennaXML)
antennaList = list()
rows = antenna.getElementsByTagName('row')
for i in rows:
antennaList.append((
i.getElementsByTagName('antennaId')[0].firstChild.data,
i.getElementsByTagName('name')[0].firstChild.data,
i.getElementsByTagName('antennaMake')[0].firstChild.data,
i.getElementsByTagName('dishDiameter')[0].firstChild.data,
i.getElementsByTagName('stationId')[0].firstChild.data,
))
return pd.DataFrame(antennaList,columns=['antennaId','name','antennaMake','dishDiameter','stationId'])
else:
return False
def getCalPhase(uid=None):
calPhaseXML = GetXML(uid,'CalPhase')
if calPhaseXML is not False:
calPhase = minidom.parseString(calPhaseXML)
calPhaseList = list()
rows = calPhase.getElementsByTagName('row')
for i in rows:
calPhaseList.append((
i.getElementsByTagName('basebandName')[0].firstChild.data,
i.getElementsByTagName('receiverBand')[0].firstChild.data,
i.getElementsByTagName('atmPhaseCorrection')[0].firstChild.data,
i.getElementsByTagName('startValidTime')[0].firstChild.data,
i.getElementsByTagName('endValidTime')[0].firstChild.data,
i.getElementsByTagName('numBaseline')[0].firstChild.data,
i.getElementsByTagName('numReceptor')[0].firstChild.data,
i.getElementsByTagName('ampli')[0].firstChild.data,
i.getElementsByTagName('antennaNames')[0].firstChild.data,
i.getElementsByTagName('baselineLengths')[0].firstChild.data,
i.getElementsByTagName('decorrelationFactor')[0].firstChild.data,
i.getElementsByTagName('direction')[0].firstChild.data,
i.getElementsByTagName('frequencyRange')[0].firstChild.data,
i.getElementsByTagName('integrationTime')[0].firstChild.data,
i.getElementsByTagName('phase')[0].firstChild.data,
i.getElementsByTagName('polarizationTypes')[0].firstChild.data,
i.getElementsByTagName('phaseRMS')[0].firstChild.data,
i.getElementsByTagName('statPhaseRMS')[0].firstChild.data,
i.getElementsByTagName('calDataId')[0].firstChild.data,
i.getElementsByTagName('calReductionId')[0].firstChild.data,
))
return pd.DataFrame(calPhaseList,columns=['basebandName','receiverBand','atmPhaseCorrection','startValidTime','endValidTime','numBaseline',
'numReceptor','ampli','antennaNames','baselineLengths','decorrelationFactor','direction',
'frequencyRange','integrationTime','phase','polarizationTypes','phaseRMS','statPhaseRMS','calDataId','calReductionId'])
else:
return False
def getCalDelay(uid=None):
calDelayXML = GetXML(uid,'CalDelay')
if calDelayXML is not False:
calDelay = minidom.parseString(calDelayXML)
calDelayList = list()
rows = calDelay.getElementsByTagName('row')
for i in rows:
calDelayList.append((
i.getElementsByTagName('basebandName')[0].firstChild.data,
i.getElementsByTagName('receiverBand')[0].firstChild.data,
i.getElementsByTagName('atmPhaseCorrection')[0].firstChild.data,
i.getElementsByTagName('startValidTime')[0].firstChild.data,
i.getElementsByTagName('endValidTime')[0].firstChild.data,
i.getElementsByTagName('numBaseline')[0].firstChild.data,
i.getElementsByTagName('numReceptor')[0].firstChild.data,
i.getElementsByTagName('ampli')[0].firstChild.data,
i.getElementsByTagName('antennaNames')[0].firstChild.data,
i.getElementsByTagName('baselineLengths')[0].firstChild.data,
i.getElementsByTagName('decorrelationFactor')[0].firstChild.data,
i.getElementsByTagName('direction')[0].firstChild.data,
i.getElementsByTagName('frequencyRange')[0].firstChild.data,
i.getElementsByTagName('integrationTime')[0].firstChild.data,
i.getElementsByTagName('phase')[0].firstChild.data,
i.getElementsByTagName('polarizationTypes')[0].firstChild.data,
i.getElementsByTagName('phaseRMS')[0].firstChild.data,
i.getElementsByTagName('statPhaseRMS')[0].firstChild.data,
i.getElementsByTagName('calDataId')[0].firstChild.data,
i.getElementsByTagName('calReductionId')[0].firstChild.data,
))
return pd.DataFrame(calDelayList,columns=['basebandName','receiverBand','atmPhaseCorrection','startValidTime','endValidTime','numBaseline',
'numReceptor','ampli','antennaNames','baselineLengths','decorrelationFactor','direction',
'frequencyRange','integrationTime','phase','polarizationTypes','phaseRMS','statPhaseRMS','calDataId','calReductionId'])
else:
return False
def getSBSummary(uid=None):
summaryXML = GetXML(uid,'SBSummary')
if summaryXML is not False:
summary = minidom.parseString(summaryXML)
summaryList = list()
rows = summary.getElementsByTagName('row')
for i in rows:
summaryList.append((i.getElementsByTagName('sbSummaryUID')[0].getElementsByTagName('EntityRef')[0].getAttribute('entityId'),
i.getElementsByTagName('projectUID')[0].getElementsByTagName('EntityRef')[0].getAttribute('entityId'),
i.getElementsByTagName('obsUnitSetUID')[0].getElementsByTagName('EntityRef')[0].getAttribute('entityId'),
float(i.getElementsByTagName('frequency')[0].firstChild.data),
i.getElementsByTagName('frequencyBand')[0].firstChild.data,
i.getElementsByTagName('scienceGoal')[0].firstChild.data,
i.getElementsByTagName('weatherConstraint')[0].firstChild.data ))
return pd.DataFrame(summaryList, columns=['sbSummaryUID', 'projectUID', 'obsUnitSetUID', 'frequency',
'frequencyBand', 'scienceGoal', 'weatherConstraint'])
else:
return False
def getScan(uid=None):
scanXML = GetXML(uid,'Scan')
if scanXML is not False:
scan = minidom.parseString(scanXML)
scanList = list()
rows = scan.getElementsByTagName('row')
for i in rows:
try:
scanList.append((int(i.getElementsByTagName('scanNumber')[0].firstChild.data),
int(i.getElementsByTagName('startTime')[0].firstChild.data),
int(i.getElementsByTagName('endTime')[0].firstChild.data),
#i.getElementsByTagName('numIntent')[0].firstChild.data,
int(i.getElementsByTagName('numSubscan')[0].firstChild.data),
arrayParser(i.getElementsByTagName('scanIntent')[0].firstChild.data, 1),
arrayParser(i.getElementsByTagName('calDataType')[0].firstChild.data, 1),
int(i.getElementsByTagName('numField')[0].firstChild.data),
i.getElementsByTagName('fieldName')[0].firstChild.data,
i.getElementsByTagName('sourceName')[0].firstChild.data))
except IndexError as e:
scanList.append((int(i.getElementsByTagName('scanNumber')[0].firstChild.data),
int(i.getElementsByTagName('startTime')[0].firstChild.data),
int(i.getElementsByTagName('endTime')[0].firstChild.data),
#i.getElementsByTagName('numIntent')[0].firstChild.data,
int(i.getElementsByTagName('numSubscan')[0].firstChild.data),
arrayParser(i.getElementsByTagName('scanIntent')[0].firstChild.data, 1),
arrayParser(i.getElementsByTagName('calDataType')[0].firstChild.data, 1),
0,
u"None",
u"None"))
return pd.DataFrame(scanList, columns=['scanNumber', 'startTime', 'endTime', 'numSubscan',
'scanIntent', 'calDataType', 'numField', 'fieldName', 'sourceName'])
else:
return False
def getStation(uid=None):
stationXML = GetXML(uid,'Station')
if stationXML is not False:
station = minidom.parseString(stationXML)
stationList = list()
rows = station.getElementsByTagName('row')
for i in rows:
try:
stationList.append((
i.getElementsByTagName('stationId')[0].firstChild.data,
i.getElementsByTagName('name')[0].firstChild.data,
i.getElementsByTagName('position')[0].firstChild.data,
i.getElementsByTagName('type')[0].firstChild.data,
))
except IndexError as error:
print error
return False
return pd.DataFrame(stationList ,columns=['stationId','name','position','type'])
def getSubScan(uid=None):
subscanXML = GetXML(uid,'Subscan')
if subscanXML is not False:
subscan = minidom.parseString(subscanXML)
subscanList = list()
rows = subscan.getElementsByTagName('row')
for i in rows:
subscanList.append((int(i.getElementsByTagName('scanNumber')[0].firstChild.data),
int(i.getElementsByTagName('subscanNumber')[0].firstChild.data),
int(i.getElementsByTagName('startTime')[0].firstChild.data),
int(i.getElementsByTagName('endTime')[0].firstChild.data),
i.getElementsByTagName('fieldName')[0].firstChild.data,
i.getElementsByTagName('subscanIntent')[0].firstChild.data,
i.getElementsByTagName('subscanMode')[0].firstChild.data,
i.getElementsByTagName('numIntegration')[0].firstChild.data,
#i.getElementsByTagName('numSubintegration')[0].firstChild.data,
#i.getElementsByTagName('correlatorCalibration')[0].firstChild.data
))
return pd.DataFrame(subscanList, columns=['scanNumber','subscanNumber','startTime','endTime','fieldName',
'subscanIntent','subscanMode','numIntegration'])
else:
return False
def getSource(uid=None):
sourceXML = GetXML(uid,'Source')
if sourceXML is not False:
source = minidom.parseString(sourceXML)
sourceList = list()
rows = source.getElementsByTagName('row')
#there are missing fields in some rows for the Source table.
for i in rows:
sourceList.append((int(i.getElementsByTagName('sourceId')[0].firstChild.data),
i.getElementsByTagName('timeInterval')[0].firstChild.data,
i.getElementsByTagName('direction')[0].firstChild.data,
i.getElementsByTagName('directionCode')[0].firstChild.data,
i.getElementsByTagName('sourceName')[0].firstChild.data,
i.getElementsByTagName('spectralWindowId')[0].firstChild.data))
return pd.DataFrame(sourceList,columns=['sourceId','timeInterval','direction','directionCode','sourceName',
'spectralWindowId'])
else:
return False
def getSpectralWindow(uid=None):
spwXML = GetXML(uid,'SpectralWindow')
if spwXML is not False:
spw = minidom.parseString(spwXML)
spwList = list()
rows = spw.getElementsByTagName('row')
for i in rows:
if int(i.getElementsByTagName('numChan')[0].firstChild.data) > 4:
try:
spwList.append((i.getElementsByTagName('spectralWindowId')[0].firstChild.data,
i.getElementsByTagName('basebandName')[0].firstChild.data,
i.getElementsByTagName('netSideband')[0].firstChild.data,
int(i.getElementsByTagName('numChan')[0].firstChild.data),
float(i.getElementsByTagName('refFreq')[0].firstChild.data),
i.getElementsByTagName('sidebandProcessingMode')[0].firstChild.data,
float(i.getElementsByTagName('totBandwidth')[0].firstChild.data),
i.getElementsByTagName('chanFreqStart')[0].firstChild.data,
i.getElementsByTagName('chanFreqStep')[0].firstChild.data,
i.getElementsByTagName('chanWidth')[0].firstChild.data,
i.getElementsByTagName('effectiveBw')[0].firstChild.data,
i.getElementsByTagName('name')[0].firstChild.data,
#i.getElementsByTagName('resolutionArray')[0].firstChild.data,
i.getElementsByTagName('assocNature')[0].firstChild.data,
i.getElementsByTagName('assocSpectralWindowId')[0].firstChild.data))
except IndexError as e:
print e
return pd.DataFrame(spwList, columns=['spectralWindowId', 'basebandName', 'netSideband', 'numChan',
'refFreq', 'sidebandProcessingMode', 'totBandwidth', 'chanFreqStart','chanFreqStep','chanWidth',
'effectiveBw', 'name',
'assocNature', 'assocSpectralWindowId'])
else:
return False
def getField(uid=None):
fieldXML = GetXML(uid,'Field')
if fieldXML is not False:
field = minidom.parseString(fieldXML)
fieldList = list()
rows = field.getElementsByTagName('row')
for i in rows:
fieldList.append((i.getElementsByTagName('fieldId')[0].firstChild.data,
i.getElementsByTagName('fieldName')[0].firstChild.data,
i.getElementsByTagName('numPoly')[0].firstChild.data,
#i.getElementsByTagName('delayDir')[0].firstChild.data,
#i.getElementsByTagName('phaseDir')[0].firstChild.data,
i.getElementsByTagName('referenceDir')[0].firstChild.data,
int(i.getElementsByTagName('time')[0].firstChild.data),
i.getElementsByTagName('code')[0].firstChild.data,
i.getElementsByTagName('directionCode')[0].firstChild.data,
int(i.getElementsByTagName('sourceId')[0].firstChild.data)))
#return pd.DataFrame(fieldList, columns=['fieldId', 'fieldName', 'numPoly','delayDir','phaseDir','referenceDir', 'time', 'code', 'directionCode', 'sourceId'])
return pd.DataFrame(fieldList, columns=['fieldId', 'fieldName', 'numPoly','referenceDir', 'time', 'code', 'directionCode', 'sourceId'])
else:
return False
def getSysCal(uid=None):
syscalXML = GetXML(uid,'SysCal')
if syscalXML is not False:
syscal = minidom.parseString(syscalXML)
syscalList = list()
rows = syscal.getElementsByTagName('row')
for i in rows:
syscalList.append((
i.getElementsByTagName('timeInterval')[0].firstChild.data,
i.getElementsByTagName('numReceptor')[0].firstChild.data,
i.getElementsByTagName('numChan')[0].firstChild.data,
i.getElementsByTagName('tcalFlag')[0].firstChild.data,
i.getElementsByTagName('tcalSpectrum')[0].firstChild.data,
i.getElementsByTagName('trxFlag')[0].firstChild.data,
i.getElementsByTagName('trxSpectrum')[0].firstChild.data,
i.getElementsByTagName('tskyFlag')[0].firstChild.data,
i.getElementsByTagName('tskySpectrum')[0].firstChild.data,
i.getElementsByTagName('tsysFlag')[0].firstChild.data,
i.getElementsByTagName('tsysSpectrum')[0].firstChild.data,
i.getElementsByTagName('antennaId')[0].firstChild.data.strip(),
i.getElementsByTagName('feedId')[0].firstChild.data,
i.getElementsByTagName('spectralWindowId')[0].firstChild.data.strip() ))
return pd.DataFrame(syscalList, columns=['timeInterval','numReceptor','numChan','tcalFlag','tcalSpectrum','trxFlag',
'trxSpectrum','tskyFlag','tskySpectrum','tsysFlag','tsysSpectrum','antennaId',
'feedId','spectralWindowId'])
else:
return False
def getSBData(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
schedList = list()
rowsBL = sched.getElementsByTagName('sbl:BLSpectralWindow')
rowsACA = sched.getElementsByTagName('sbl:ACASpectralWindow')
rows = rowsBL if len(rowsBL) > len(rowsACA) else rowsACA
for i in rows:
brother = i.parentNode.getElementsByTagName('sbl:BaseBandSpecificationRef')
parent = i.parentNode.parentNode.parentNode
schedList.append((
parent.getAttribute('entityPartId'),
parent.getAttribute('switchingType'),
#parent.getAttribute('receiverType'),
parent.getElementsByTagName('sbl:name')[0].firstChild.data,
brother[0].getAttribute('entityId'),
brother[0].getAttribute('partId'),
#brother[0].getAttribute('entityTypeName'),
#i.getAttribute('sideBand'),
#i.getAttribute('windowFunction'),
i.getAttribute('polnProducts'),
#i.getAttribute('correlationBits'),
i.getElementsByTagName('sbl:centerFrequency')[0].firstChild.data,
i.getElementsByTagName('sbl:centerFrequency')[0].getAttribute('unit'),
#i.getElementsByTagName('sbl:spectralAveragingFactor')[0].firstChild.data,
#i.getElementsByTagName('sbl:name')[0].firstChild.data,
i.getElementsByTagName('sbl:effectiveBandwidth')[0].firstChild.data,
i.getElementsByTagName('sbl:effectiveBandwidth')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:effectiveNumberOfChannels')[0].firstChild.data,
#i.getElementsByTagName('sbl:useThisSpectralWindow')[0].firstChild.data,
i.getElementsByTagName('sbl:ChannelAverageRegion')[0].getElementsByTagName('sbl:startChannel')[0].firstChild.data,
i.getElementsByTagName('sbl:ChannelAverageRegion')[0].getElementsByTagName('sbl:numberChannels')[0].firstChild.data,
))
specs = pd.DataFrame(schedList)
rows = sched.getElementsByTagName('sbl:BaseBandSpecification')
bbList = list()
for i in rows:
parent = i.parentNode
bbList.append((
parent.getAttribute('receiverBand'),
parent.getAttribute('dopplerReference'),
parent.getElementsByTagName('sbl:restFrequency')[0].firstChild.data,
parent.getElementsByTagName('sbl:restFrequency')[0].getAttribute('unit'),
parent.getElementsByTagName('sbl:frequencySwitching')[0].firstChild.data,
parent.getElementsByTagName('sbl:lO2Frequency')[0].firstChild.data,
parent.getElementsByTagName('sbl:lO2Frequency')[0].getAttribute('unit'),
#parent.getElementsByTagName('sbl:weighting')[0].firstChild.data,
#parent.getElementsByTagName('sbl:useUSB')[0].firstChild.data,
#parent.getElementsByTagName('sbl:use12GHzFilter')[0].firstChild.data,
#parent.getElementsByTagName('sbl:imageCenterFrequency')[0].firstChild.data,
#parent.getElementsByTagName('sbl:imageCenterFrequency')[0].getAttribute('unit'),
i.getAttribute('entityPartId'),
i.getAttribute('baseBandName'),
#i.getAttribute('sideBandPreference'),
i.getElementsByTagName('sbl:centerFrequency')[0].firstChild.data,
i.getElementsByTagName('sbl:lO2Frequency')[0].firstChild.data,
i.getElementsByTagName('sbl:lO2Frequency')[0].getAttribute('unit'),
#i.getElementsByTagName('sbl:weighting')[0].firstChild.data,
#i.getElementsByTagName('sbl:useUSB')[0].firstChild.data,
#i.getElementsByTagName('sbl:use12GHzFilter')[0].firstChild.data,
#i.getElementsByTagName('sbl:imageCenterFrequency')[0].firstChild.data,
#i.getElementsByTagName('sbl:imageCenterFrequency')[0].getAttribute('unit')
))
bb = pd.DataFrame(bbList)
targetList = list()
rows = sched.getElementsByTagName('sbl:Target')
for i in rows:
targetList.append((
i.getAttribute('entityPartId'),
i.getElementsByTagName('sbl:AbstractInstrumentSpecRef')[0].getAttribute('partId'),
i.getElementsByTagName('sbl:FieldSourceRef')[0].getAttribute('partId'),
i.getElementsByTagName('sbl:ObservingParametersRef')[0].getAttribute('partId'),
))
target = pd.DataFrame(targetList, columns=['entityPartId', 'InstrumentSpec', 'FieldSource', 'ObsParameter'])
rows = sched.getElementsByTagName('sbl:ScienceParameters')
scienceList = list()
for i in rows:
scienceList.append((
i.getAttribute('entityPartId'),
i.getElementsByTagName('sbl:name')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeBandwidth')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeBandwidth')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:representativeFrequency')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeFrequency')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:sensitivityGoal')[0].firstChild.data,
i.getElementsByTagName('sbl:sensitivityGoal')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:integrationTime')[0].firstChild.data,
i.getElementsByTagName('sbl:integrationTime')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:subScanDuration')[0].firstChild.data,
i.getElementsByTagName('sbl:subScanDuration')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:forceAtmCal')[0].firstChild.data
))
science = pd.DataFrame(scienceList)
rows = sched.getElementsByTagName('sbl:PhaseCalParameters')
phaseList = list()
for i in rows:
phaseList.append((
i.getAttribute('entityPartId'),
#i.getElementsByTagName('sbl:name')[0].firstChild.data,
i.getElementsByTagName('sbl:cycleTime')[0].firstChild.data,
i.getElementsByTagName('sbl:cycleTime')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:defaultIntegrationTime')[0].firstChild.data,
i.getElementsByTagName('sbl:defaultIntegrationTime')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:subScanDuration')[0].firstChild.data,
i.getElementsByTagName('sbl:subScanDuration')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:forceAtmCal')[0].firstChild.data,
i.getElementsByTagName('sbl:forceExecution')[0].firstChild.data
))
phase = pd.DataFrame(phaseList)
rows = sched.getElementsByTagName('sbl:FieldSource')
fieldList = list()
for i in rows:
fieldList.append((
i.getAttribute('entityPartId'),
i.getAttribute('solarSystemObject'),
i.getElementsByTagName('sbl:sourceName')[0].firstChild.data,
#i.getElementsByTagName('sbl:sourceEphemeris')[0].firstChild.data,
i.getElementsByTagName('sbl:name')[0].firstChild.data,
))
field = pd.DataFrame(fieldList)
return bb,specs,target,phase,science,field
def getSBFields(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
rows = sched.getElementsByTagName('sbl:FieldSource')
fieldList = list()
for i in rows:
fieldList.append((
i.getAttribute('entityPartId'),
i.getAttribute('solarSystemObject'),
i.getElementsByTagName('sbl:sourceName')[0].firstChild.data,
#i.getElementsByTagName('sbl:sourceEphemeris')[0].firstChild.data,
i.getElementsByTagName('sbl:name')[0].firstChild.data,
i.getElementsByTagName('sbl:sourceCoordinates')[0].getElementsByTagName('val:longitude')[0].firstChild.data,
i.getElementsByTagName('sbl:sourceCoordinates')[0].getElementsByTagName('val:latitude')[0].firstChild.data,
))
field = pd.DataFrame(fieldList, columns=['entityPartId','solarSystemObject','sourceName','name','longitude','latitude'])
return field
def getSBScience(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
rows = sched.getElementsByTagName('sbl:ScienceParameters')
scienceList = list()
for i in rows:
scienceList.append((
i.getAttribute('entityPartId'),
i.getElementsByTagName('sbl:name')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeBandwidth')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeBandwidth')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:representativeFrequency')[0].firstChild.data,
i.getElementsByTagName('sbl:representativeFrequency')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:sensitivityGoal')[0].firstChild.data,
i.getElementsByTagName('sbl:sensitivityGoal')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:integrationTime')[0].firstChild.data,
i.getElementsByTagName('sbl:integrationTime')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:subScanDuration')[0].firstChild.data,
i.getElementsByTagName('sbl:subScanDuration')[0].getAttribute('unit'),
i.getElementsByTagName('sbl:forceAtmCal')[0].firstChild.data
))
science = pd.DataFrame(scienceList, columns=['entityPartId','name','representativeBandwidth','unit_rb','representativeFrequency','unit_rf',
'sensitivityGoal','unit_sg','integrationTime','unit_it','subScanDuration','unit_sc','forceAtmCal'])
return science
def getSBTargets(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
targetList = list()
rows = sched.getElementsByTagName('sbl:Target')
for i in rows:
targetList.append((
i.getAttribute('entityPartId'),
i.getElementsByTagName('sbl:AbstractInstrumentSpecRef')[0].getAttribute('partId'),
i.getElementsByTagName('sbl:FieldSourceRef')[0].getAttribute('partId'),
i.getElementsByTagName('sbl:ObservingParametersRef')[0].getAttribute('partId'),
))
target = pd.DataFrame(targetList, columns=['entityPartId', 'InstrumentSpec', 'FieldSource', 'ObsParameter'])
return target
def getSBOffsets(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
offsetList = list()
rows = sched.getElementsByTagName('sbl:phaseCenterCoordinates')
for i in rows:
offsetList.append((
i.parentNode.parentNode.getAttribute('entityPartId'),
i.getAttribute('system'),
i.getAttribute('type'),
i.getElementsByTagName('val:longitude')[0].firstChild.data,
i.getElementsByTagName('val:longitude')[0].getAttribute('unit'),
i.getElementsByTagName('val:latitude')[0].firstChild.data,
i.getElementsByTagName('val:latitude')[0].getAttribute('unit'),
))
offset = pd.DataFrame(offsetList, columns=['partId','system', 'type', 'longitude','lon_unit', 'latitude','lat_unit'])
return offset
def getScienceGoal(prjUID=None):
projXML = GetXML(prjUID, 'ObsProject')
proj = minidom.parseString(projXML)
scienceGoalList = list()
rows = proj.getElementsByTagName('prj:ScienceSpectralWindow')
for i in rows:
scienceGoalList.append((
i.parentNode.parentNode.getElementsByTagName('prj:name')[0].firstChild.data,
i.parentNode.parentNode.getElementsByTagName('prj:ObsUnitSetRef')[0].getAttribute('entityId'),
i.parentNode.parentNode.getElementsByTagName('prj:ObsUnitSetRef')[0].getAttribute('partId'),
i.parentNode.getElementsByTagName('prj:representativeFrequency')[0].firstChild.data,
i.parentNode.getElementsByTagName('prj:userRepresentativeFrequency')[0].firstChild.data,
i.getElementsByTagName('prj:centerFrequency')[0].firstChild.data,
i.getElementsByTagName('prj:representativeWindow')[0].firstChild.data,
))
scienceGoal = pd.DataFrame(scienceGoalList)
return scienceGoal
def getSB_spectralconf(sbuid=None):
schedXML = GetXML(sbuid, 'SchedBlock')
sched = minidom.parseString(schedXML)
schedList = list()
rowsBL = sched.getElementsByTagName('sbl:BLSpectralWindow')
rowsACA = sched.getElementsByTagName('sbl:ACASpectralWindow')
rows = rowsBL if len(rowsBL) > len(rowsACA) else rowsACA
for i in rows:
brother = i.parentNode.getElementsByTagName('sbl:BaseBandSpecificationRef')
parent = i.parentNode.parentNode.parentNode
schedList.append((
parent.getAttribute('entityPartId'),
parent.getAttribute('switchingType'),
#parent.getAttribute('receiverType'),
parent.getElementsByTagName('sbl:name')[0].firstChild.data,
brother[0].getAttribute('entityId'),
brother[0].getAttribute('partId'),
#brother[0].getAttribute('entityTypeName'),
#i.getAttribute('sideBand'),
#i.getAttribute('windowFunction'),
#i.getAttribute('polnProducts'),
#i.getAttribute('correlationBits'),
#i.getElementsByTagName('sbl:centerFrequency')[0].firstChild.data,
#i.getElementsByTagName('sbl:centerFrequency')[0].getAttribute('unit'),
#i.getElementsByTagName('sbl:spectralAveragingFactor')[0].firstChild.data,
#i.getElementsByTagName('sbl:name')[0].firstChild.data,
#i.getElementsByTagName('sbl:effectiveBandwidth')[0].firstChild.data,
#i.getElementsByTagName('sbl:effectiveBandwidth')[0].getAttribute('unit'),
#i.getElementsByTagName('sbl:effectiveNumberOfChannels')[0].firstChild.data,
#i.getElementsByTagName('sbl:useThisSpectralWindow')[0].firstChild.data,
#i.getElementsByTagName('sbl:ChannelAverageRegion')[0].getElementsByTagName('sbl:startChannel')[0].firstChild.data,
#i.getElementsByTagName('sbl:ChannelAverageRegion')[0].getElementsByTagName('sbl:numberChannels')[0].firstChild.data,
))
specs = pd.DataFrame(schedList)
rows = sched.getElementsByTagName('sbl:BaseBandSpecification')
bbList = list()
for i in rows:
parent = i.parentNode
bbList.append((
parent.getAttribute('receiverBand'),
#parent.getAttribute('dopplerReference'),
#parent.getElementsByTagName('sbl:restFrequency')[0].firstChild.data,
#parent.getElementsByTagName('sbl:restFrequency')[0].getAttribute('unit'),
#parent.getElementsByTagName('sbl:frequencySwitching')[0].firstChild.data,
#parent.getElementsByTagName('sbl:lO2Frequency')[0].firstChild.data,
#parent.getElementsByTagName('sbl:lO2Frequency')[0].getAttribute('unit'),
#parent.getElementsByTagName('sbl:weighting')[0].firstChild.data,
#parent.getElementsByTagName('sbl:useUSB')[0].firstChild.data,
#parent.getElementsByTagName('sbl:use12GHzFilter')[0].firstChild.data,
#parent.getElementsByTagName('sbl:imageCenterFrequency')[0].firstChild.data,
#parent.getElementsByTagName('sbl:imageCenterFrequency')[0].getAttribute('unit'),
i.getAttribute('entityPartId'),
i.getAttribute('baseBandName'),
#i.getAttribute('sideBandPreference'),
#i.getElementsByTagName('sbl:centerFrequency')[0].firstChild.data,
#i.getElementsByTagName('sbl:lO2Frequency')[0].firstChild.data,
#i.getElementsByTagName('sbl:lO2Frequency')[0].getAttribute('unit'),
#i.getElementsByTagName('sbl:weighting')[0].firstChild.data,
#i.getElementsByTagName('sbl:useUSB')[0].firstChild.data,
#i.getElementsByTagName('sbl:use12GHzFilter')[0].firstChild.data,
#i.getElementsByTagName('sbl:imageCenterFrequency')[0].firstChild.data,
#i.getElementsByTagName('sbl:imageCenterFrequency')[0].getAttribute('unit')
))
bb = pd.DataFrame(bbList)
return bb,specs
| mit |
boada/desCluster | testing/snippets/calc_mass.py | 4 | 1512 | from math import sqrt
from astLib import astStats
from astLib import astCalc
import pandas as pd
from glob import glob
from bootstrap import bootstrap
def calcVD_big(data):
return astStats.biweightScale(data, tuningConstant=9.0)
def calcVD_small(data):
return astStats.gapperEstimator(data)
def calc_mass(data):
if len(data) > 10:
vd = calcVD_big(data['LOSV'].values)
up, low = bootstrap(data['LOSV'].values, astStats.biweightScale,
alpha=0.32, tuningConstant=9.0)
else:
vd = calcVD_small(data['LOSV'].values)
up, low = bootstrap(data['LOSV'].values, astStats.gapperEstimator,
alpha=0.32)
# print vd, abs(vd-up), abs(vd-low),
avgz = astStats.biweightLocation(data['redshift'].values, tuningConstant=6.0)
r200 = sqrt(3) * vd /(10*astCalc.H0 * astCalc.Ez(avgz))
r200up = sqrt(3) * up /(10*astCalc.H0 * astCalc.Ez(avgz))
r200low = sqrt(3) * low /(10*astCalc.H0 * astCalc.Ez(avgz))
# print r200, abs(r200-r200up), abs(r200-r200low),
a = 3 * sqrt(3) * 1000**3 * 3.08E19/(10*astCalc.H0 * astCalc.Ez(avgz) *\
6.67384E-11)
m200 = a * vd**3
# propagate errors
m200low = a * vd**2 * 3 * low
m200up = a * vd**2 * 3 * up
print vd, m200/1.9891E30, m200low/1.9891E30, m200up/1.9891E30
return data, m200, vd
files = glob('*members.csv')
for f in files:
data = pd.read_csv(f)
print f.split('_')[0],
mass = calc_mass(data)
#print len(data), mass/1.9891E30
| mit |
aewhatley/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
robertwb/incubator-beam | sdks/python/apache_beam/dataframe/schemas.py | 3 | 11873 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""Utilities for relating schema-aware PCollections and dataframe transforms.
Imposes a mapping between native Python typings (specifically those compatible
with :mod:`apache_beam.typehints.schemas`), and common pandas dtypes::
pandas dtype Python typing
np.int{8,16,32,64} <-----> np.int{8,16,32,64}*
pd.Int{8,16,32,64}Dtype <-----> Optional[np.int{8,16,32,64}]*
np.float{32,64} <-----> Optional[np.float{32,64}]
\--- np.float{32,64}
Not supported <------ Optional[bytes]
np.bool <-----> np.bool
np.dtype('S') <-----> bytes
pd.BooleanDType() <-----> Optional[bool]
pd.StringDType() <-----> Optional[str]
\--- str
np.object <-----> Any
* int, float, bool are treated the same as np.int64, np.float64, np.bool
Note that when converting to pandas dtypes, any types not specified here are
shunted to ``np.object``.
Similarly when converting from pandas to Python types, types that aren't
otherwise specified here are shunted to ``Any``. Notably, this includes
``np.datetime64``.
Pandas does not support hierarchical data natively. Currently, all structured
types (``Sequence``, ``Mapping``, nested ``NamedTuple`` types), are
shunted to ``np.object`` like all other unknown types. In the future these
types may be given special consideration.
"""
# pytype: skip-file
from typing import Any
from typing import NamedTuple
from typing import Optional
from typing import TypeVar
from typing import Union
import numpy as np
import pandas as pd
import apache_beam as beam
from apache_beam import typehints
from apache_beam.portability.api import schema_pb2
from apache_beam.transforms.util import BatchElements
from apache_beam.typehints.native_type_compatibility import _match_is_optional
from apache_beam.typehints.schemas import named_fields_from_element_type
from apache_beam.typehints.schemas import named_fields_to_schema
from apache_beam.typehints.schemas import named_tuple_from_schema
from apache_beam.typehints.schemas import named_tuple_to_schema
from apache_beam.utils import proto_utils
__all__ = (
'BatchRowsAsDataFrame',
'generate_proxy',
'UnbatchPandas',
'element_type_from_dataframe')
T = TypeVar('T', bound=NamedTuple)
# Generate type map (presented visually in the docstring)
_BIDIRECTIONAL = [
(bool, bool),
(np.int8, np.int8),
(np.int16, np.int16),
(np.int32, np.int32),
(np.int64, np.int64),
(pd.Int8Dtype(), Optional[np.int8]),
(pd.Int16Dtype(), Optional[np.int16]),
(pd.Int32Dtype(), Optional[np.int32]),
(pd.Int64Dtype(), Optional[np.int64]),
(np.float32, Optional[np.float32]),
(np.float64, Optional[np.float64]),
(object, Any),
(pd.StringDtype(), Optional[str]),
(pd.BooleanDtype(), Optional[bool]),
]
PANDAS_TO_BEAM = {
pd.Series([], dtype=dtype).dtype: fieldtype
for dtype,
fieldtype in _BIDIRECTIONAL
}
BEAM_TO_PANDAS = {fieldtype: dtype for dtype, fieldtype in _BIDIRECTIONAL}
# Shunt non-nullable Beam types to the same pandas types as their non-nullable
# equivalents for FLOATs, DOUBLEs, and STRINGs. pandas has no non-nullable dtype
# for these.
OPTIONAL_SHUNTS = [np.float32, np.float64, str]
for typehint in OPTIONAL_SHUNTS:
BEAM_TO_PANDAS[typehint] = BEAM_TO_PANDAS[Optional[typehint]]
# int, float -> int64, np.float64
BEAM_TO_PANDAS[int] = BEAM_TO_PANDAS[np.int64]
BEAM_TO_PANDAS[Optional[int]] = BEAM_TO_PANDAS[Optional[np.int64]]
BEAM_TO_PANDAS[float] = BEAM_TO_PANDAS[np.float64]
BEAM_TO_PANDAS[Optional[float]] = BEAM_TO_PANDAS[Optional[np.float64]]
BEAM_TO_PANDAS[bytes] = 'bytes'
@typehints.with_input_types(T)
@typehints.with_output_types(pd.DataFrame)
class BatchRowsAsDataFrame(beam.PTransform):
"""A transform that batches schema-aware PCollection elements into DataFrames
Batching parameters are inherited from
:class:`~apache_beam.transforms.util.BatchElements`.
"""
def __init__(self, *args, proxy=None, **kwargs):
self._batch_elements_transform = BatchElements(*args, **kwargs)
self._proxy = proxy
def expand(self, pcoll):
proxy = generate_proxy(
pcoll.element_type) if self._proxy is None else self._proxy
if isinstance(proxy, pd.DataFrame):
columns = proxy.columns
construct = lambda batch: pd.DataFrame.from_records(
batch, columns=columns)
elif isinstance(proxy, pd.Series):
dtype = proxy.dtype
construct = lambda batch: pd.Series(batch, dtype=dtype)
else:
raise NotImplementedError("Unknown proxy type: %s" % proxy)
return pcoll | self._batch_elements_transform | beam.Map(construct)
def generate_proxy(element_type):
# type: (type) -> pd.DataFrame
"""Generate a proxy pandas object for the given PCollection element_type.
Currently only supports generating a DataFrame proxy from a schema-aware
PCollection or a Series proxy from a primitively typed PCollection.
"""
if element_type != Any and element_type in BEAM_TO_PANDAS:
return pd.Series(dtype=BEAM_TO_PANDAS[element_type])
else:
fields = named_fields_from_element_type(element_type)
proxy = pd.DataFrame(columns=[name for name, _ in fields])
for name, typehint in fields:
# Default to np.object. This is lossy, we won't be able to recover
# the type at the output.
dtype = BEAM_TO_PANDAS.get(typehint, object)
proxy[name] = proxy[name].astype(dtype)
return proxy
def element_type_from_dataframe(proxy, include_indexes=False):
# type: (pd.DataFrame, bool) -> type
"""Generate an element_type for an element-wise PCollection from a proxy
pandas object. Currently only supports converting the element_type for
a schema-aware PCollection to a proxy DataFrame.
Currently only supports generating a DataFrame proxy from a schema-aware
PCollection.
"""
output_columns = []
if include_indexes:
remaining_index_names = list(proxy.index.names)
i = 0
while len(remaining_index_names):
index_name = remaining_index_names.pop(0)
if index_name is None:
raise ValueError(
"Encountered an unnamed index. Cannot convert to a "
"schema-aware PCollection with include_indexes=True. "
"Please name all indexes or consider not including "
"indexes.")
elif index_name in remaining_index_names:
raise ValueError(
"Encountered multiple indexes with the name '%s'. "
"Cannot convert to a schema-aware PCollection with "
"include_indexes=True. Please ensure all indexes have "
"unique names or consider not including indexes." % index_name)
elif index_name in proxy.columns:
raise ValueError(
"Encountered an index that has the same name as one "
"of the columns, '%s'. Cannot convert to a "
"schema-aware PCollection with include_indexes=True. "
"Please ensure all indexes have unique names or "
"consider not including indexes." % index_name)
else:
# its ok!
output_columns.append(
(index_name, proxy.index.get_level_values(i).dtype))
i += 1
output_columns.extend(zip(proxy.columns, proxy.dtypes))
return named_tuple_from_schema(
named_fields_to_schema([(column, _dtype_to_fieldtype(dtype))
for (column, dtype) in output_columns]))
class _BaseDataframeUnbatchDoFn(beam.DoFn):
def __init__(self, namedtuple_ctor):
self._namedtuple_ctor = namedtuple_ctor
def _get_series(self, df):
raise NotImplementedError()
def process(self, df):
# TODO: Only do null checks for nullable types
def make_null_checking_generator(series):
nulls = pd.isnull(series)
return (None if isnull else value for isnull, value in zip(nulls, series))
all_series = self._get_series(df)
iterators = [
make_null_checking_generator(series) for series,
typehint in zip(all_series, self._namedtuple_ctor._field_types)
]
# TODO: Avoid materializing the rows. Produce an object that references the
# underlying dataframe
for values in zip(*iterators):
yield self._namedtuple_ctor(*values)
def infer_output_type(self, input_type):
return self._namedtuple_ctor
@classmethod
def _from_serialized_schema(cls, schema_str):
return cls(
named_tuple_from_schema(
proto_utils.parse_Bytes(schema_str, schema_pb2.Schema)))
def __reduce__(self):
# when pickling, use bytes representation of the schema.
return (
self._from_serialized_schema,
(named_tuple_to_schema(self._namedtuple_ctor).SerializeToString(), ))
class _UnbatchNoIndex(_BaseDataframeUnbatchDoFn):
def _get_series(self, df):
return [df[column] for column in df.columns]
class _UnbatchWithIndex(_BaseDataframeUnbatchDoFn):
def _get_series(self, df):
return [df.index.get_level_values(i) for i in range(len(df.index.names))
] + [df[column] for column in df.columns]
def _unbatch_transform(proxy, include_indexes):
if isinstance(proxy, pd.DataFrame):
ctor = element_type_from_dataframe(proxy, include_indexes=include_indexes)
return beam.ParDo(
_UnbatchWithIndex(ctor) if include_indexes else _UnbatchNoIndex(ctor)
).with_output_types(ctor)
elif isinstance(proxy, pd.Series):
# Raise a TypeError if proxy has an unknown type
output_type = _dtype_to_fieldtype(proxy.dtype)
# TODO: Should the index ever be included for a Series?
if _match_is_optional(output_type):
def unbatch(series):
for isnull, value in zip(pd.isnull(series), series):
yield None if isnull else value
else:
def unbatch(series):
yield from series
return beam.FlatMap(unbatch).with_output_types(output_type)
# TODO: What about scalar inputs?
else:
raise TypeError(
"Proxy '%s' has unsupported type '%s'" % (proxy, type(proxy)))
def _dtype_to_fieldtype(dtype):
fieldtype = PANDAS_TO_BEAM.get(dtype)
if fieldtype is not None:
return fieldtype
elif dtype.kind == 'S':
return bytes
else:
return Any
@typehints.with_input_types(Union[pd.DataFrame, pd.Series])
class UnbatchPandas(beam.PTransform):
"""A transform that explodes a PCollection of DataFrame or Series. DataFrame
is converterd to a schema-aware PCollection, while Series is converted to its
underlying type.
Args:
include_indexes: (optional, default: False) When unbatching a DataFrame
if include_indexes=True, attempt to include index columns in the output
schema for expanded DataFrames. Raises an error if any of the index
levels are unnamed (name=None), or if any of the names are not unique
among all column and index names.
"""
def __init__(self, proxy, include_indexes=False):
self._proxy = proxy
self._include_indexes = include_indexes
def expand(self, pcoll):
return pcoll | _unbatch_transform(self._proxy, self._include_indexes)
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
petewarden/tensorflow_makefile | tensorflow/examples/skflow/iris_custom_model.py | 8 | 1430 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
def my_model(X, y):
"""This is DNN with 10, 20, 10 hidden layers, and dropout of 0.1 probability."""
layers = learn.ops.dnn(X, [10, 20, 10], dropout=0.1)
return learn.models.logistic_regression(layers, y)
classifier = learn.TensorFlowEstimator(model_fn=my_model, n_classes=3,
steps=1000)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/convert.py | 2 | 13101 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constructor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def to_networkx_graph(data, create_using=None, multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G = nx.Graph(d)
instead of the equivalent
>>> G = nx.from_dict_of_dicts(d)
Parameters
----------
data : object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dict-of-lists
list of edges
Pandas DataFrame (row per edge)
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data, "adj"):
try:
result = from_dict_of_dicts(data.adj,
create_using=create_using,
multigraph_input=data.is_multigraph())
if hasattr(data, 'graph'): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data, 'nodes'): # data.nodes should be dict-like
result._node.update((n, dd.copy()) for n, dd in data.nodes.items())
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data, "is_strict"):
try:
return nx.nx_agraph.from_agraph(data, create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data, dict):
try:
return from_dict_of_dicts(data, create_using=create_using,
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data, create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data, (list, tuple)) or
any(hasattr(data, attr) for attr in ['_adjdict', 'next', '__next__'])):
try:
return from_edgelist(data, create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
if data.shape[0] == data.shape[1]:
try:
return nx.from_pandas_adjacency(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame adjacency matrix."
raise nx.NetworkXError(msg)
else:
try:
return nx.from_pandas_edgelist(data, edge_attr=True, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame edge-list."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data, (numpy.matrix, numpy.ndarray)):
try:
return nx.from_numpy_matrix(data, create_using=create_using)
except:
raise nx.NetworkXError(
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data, "format"):
try:
return nx.from_scipy_sparse_matrix(data, create_using=create_using)
except:
raise nx.NetworkXError(
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(
"Input is not a known data type for conversion.")
def to_dict_of_lists(G, nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist = G
d = {}
for n in nodelist:
d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d, create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Examples
--------
>>> dol = {0: [1]} # single edge (0,1)
>>> G = nx.from_dict_of_lists(dol)
or
>>> G = nx.Graph(dol) # use Graph constructor
"""
G = nx.empty_graph(0, create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen = {}
for node, nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node, nbr)
seen[node] = 1 # don't allow reverse edge to show up
else:
G.add_edges_from(((node, nbr) for node, nbrlist in d.items()
for nbr in nbrlist))
return G
def to_dict_of_dicts(G, nodelist=None, edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod = {}
if nodelist is None:
if edge_data is None:
for u, nbrdict in G.adjacency():
dod[u] = nbrdict.copy()
else: # edge_data is not None
for u, nbrdict in G.adjacency():
dod[u] = dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u] = {}
for v, data in ((v, data) for v, data in G[u].items() if v in nodelist):
dod[u][v] = data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u] = {}
for v in (v for v in G[u] if v in nodelist):
dod[u][v] = edge_data
return dod
def from_dict_of_dicts(d, create_using=None, multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod = {0: {1: {'weight': 1}}} # single edge (0,1)
>>> G = nx.from_dict_of_dicts(dod)
or
>>> G = nx.Graph(dod) # use Graph constructor
"""
G = nx.empty_graph(0, create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from((u, v, key, data)
for u, nbrs in d.items()
for v, datadict in nbrs.items()
for key, data in datadict.items())
else:
G.add_edges_from((u, v, data)
for u, nbrs in d.items()
for v, datadict in nbrs.items()
for key, data in datadict.items())
else: # Undirected
if G.is_multigraph():
seen = set() # don't add both directions of undirected graph
for u, nbrs in d.items():
for v, datadict in nbrs.items():
if (u, v) not in seen:
G.add_edges_from((u, v, key, data)
for key, data in datadict.items())
seen.add((v, u))
else:
seen = set() # don't add both directions of undirected graph
for u, nbrs in d.items():
for v, datadict in nbrs.items():
if (u, v) not in seen:
G.add_edges_from((u, v, data)
for key, data in datadict.items())
seen.add((v, u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen = set()
for u, nbrs in d.items():
for v, data in nbrs.items():
if (u, v) not in seen:
G.add_edge(u, v, key=0)
G[u][v][0].update(data)
seen.add((v, u))
else:
G.add_edges_from(((u, v, data)
for u, nbrs in d.items()
for v, data in nbrs.items()))
return G
def to_edgelist(G, nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
return G.edges(nodelist, data=True)
def from_edgelist(edgelist, create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Examples
--------
>>> edgelist = [(0, 1)] # single edge (0,1)
>>> G = nx.from_edgelist(edgelist)
or
>>> G = nx.Graph(edgelist) # use Graph constructor
"""
G = nx.empty_graph(0, create_using)
G.add_edges_from(edgelist)
return G
| gpl-3.0 |
fabianvaccaro/pygums | pythonLibs/mahotas-1.1.0/mahotas/io/matplotlibwrap.py | 2 | 1706 | # vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Copyright (C) 2013 Luis Pedro Coelho
#
# License: MIT (see COPYING file)
import numpy as np
# Importing matplotlib checks that it is importable without triggering any
# initialization (unlike importing pyplot)
import matplotlib
def imread(filename, as_grey=False):
"""
img = imread(filename, as_grey=False)
Reads an image from file `filename`
Parameters
----------
filename : file name
as_grey : Whether to convert to grey scale image (default: no)
Returns
-------
img : ndarray
"""
from matplotlib import pyplot as plt
img = plt.imread(filename)
if as_grey and len(img.shape) == 3:
# these are the values that wikipedia says are typical
transform = np.array([0.30, 0.59, 0.11])
return np.dot(img, transform)
return img
def imsave(filename, array):
'''
imsave(filename, array)
Writes `array` into file `filename`
Parameters
----------
filename : str
path on file system
array : ndarray-like
'''
from matplotlib import pyplot as plt
import numpy as np
if len(array.shape) == 2:
import warnings
warnings.warn('mahotas.imsave: The `matplotlib` backend does not support saving greyscale images natively.\n'
'Emulating by saving in RGB format (with all channels set to same value).\n'
'If this is a problem, please use another IO backend\n'
'\n'
'See http://mahotas.readthedocs.org/en/latest/io.html \n'
)
array = np.dstack([array, array, array])
plt.imsave(filename, array)
| gpl-2.0 |
joernhees/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
PmagPy/PmagPy | programs/lowrie.py | 2 | 4410 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import range
from past.utils import old_div
import sys
import codecs
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
lowrie.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie -h [command line options]
INPUT
takes SIO formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav save plots and quit
"""
fmt, plot = 'svg', 0
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
if len(sys.argv) > 1:
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # don't normalize
if '-fmt' in sys.argv: # sets input filename
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
if '-f' in sys.argv: # sets input filename
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
data = pmag.open_file(in_file)
PmagRecs = [] # set up a list for the results
keys = ['specimen', 'treatment', 'csd', 'M', 'dec', 'inc']
for line in data:
PmagRec = {}
rec = line.replace('\n', '').split()
for k in range(len(keys)):
PmagRec[keys[k]] = rec[k]
PmagRecs.append(PmagRec)
specs = pmag.get_dictkey(PmagRecs, 'specimen', '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, 'specimen', spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat['dec']), float(
dat['inc']), float(dat['M']) * 1e-3])
Temps.append(float(dat['treatment']))
carts = pmag.dir2cart(DIMs).transpose()
# if norm==1: # want to normalize
# nrm=max(max(abs(carts[0])),max(abs(carts[1])),max(abs(carts[2]))) # by maximum of x,y,z values
# ylab="M/M_max"
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_' + spc + '_.' + fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie'])
if __name__ == "__main__":
main()
| bsd-3-clause |
StuartLittlefair/astropy | astropy/modeling/physical_models.py | 3 | 23921 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy import cosmology
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
scale : float or :class:`~astropy.units.Quantity`
Scale factor
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(default=5000.0, min=0, unit=u.K)
scale = Parameter(default=1.0, min=0)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# check the units of scale and setup the output units
bb_unit = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr) # default unit
# use the scale that was used at initialization for determining the units to return
# to support returning the right units when fitting where units are stripped
if hasattr(self.scale, "unit") and self.scale.unit is not None:
# check that the units on scale are covertable to surface brightness units
if not self.scale.unit.is_equivalent(bb_unit, u.spectral_density(x)):
raise ValueError(
f"scale units not surface brightness: {self.scale.unit}"
)
# use the scale passed to get the value for scaling
if hasattr(scale, "unit"):
mult_scale = scale.value
else:
mult_scale = scale
bb_unit = self.scale.unit
else:
mult_scale = scale
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr
y = mult_scale * bb_nu.to(bb_unit, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {self.inputs[0]: u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
# bolometric flux in the native units of the planck function
native_bolflux = (
self.scale.value * const.sigma_sb * self.temperature ** 4 / np.pi
)
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm ** 2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0)
x_0 = Parameter(default=1.0)
fwhm = Parameter(default=1.0)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0 ** 2 / fwhm ** 2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm ** 2 / x_0 ** 3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
@x_0.validator
def x_0(self, val):
""" Ensure `x_0` is not 0."""
if val == 0:
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0)
r_plum = Parameter(default=1.0)
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (3*mass)/(4 * np.pi * r_plum**3) * (1+(x/r_plum)**2)**(-5/2)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4*np.pi*r_plum**3) * (((x/r_plum)**2 + 1)**(5/2)))
d_r_plum = (6*mass*x**2-9*mass*r_plum**2) / ((4*np.pi * r_plum**6) *
(1+(x/r_plum)**2)**(7/2))
return [d_mass, d_r_plum]
@property
def input_units(self):
if self.mass.unit is None and self.r_plum.unit is None:
return None
else:
return {self.inputs[0]: self.r_plum.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mass': outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
'r_plum': inputs_unit[self.inputs[0]]}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or :class:`~astropy.units.Quantity`
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(default=1.0, min=1.0, unit=u.M_sun)
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0)
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0)
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(self, mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default, redshift=redshift.default,
massfactor=("critical", 200), cosmo=None, **kwargs):
# Set default cosmology
if cosmo is None:
cosmo = cosmology.default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(mass=in_mass, concentration=concentration, redshift=redshift, **kwargs)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function
Parameters
----------
r : float or :class:`~astropy.units.Quantity`
Radial position of density to be calculated for the NFW profile.
mass : float or :class:`~astropy.units.Quantity`
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or :class:`~astropy.units.Quantity`
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (radius_reduced *
(u.Quantity(1.0) + radius_reduced) ** 2)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = 'c'
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = 'm'
else:
raise ValueError("Massfactor '" + str(massfactor[0]) + "' not one of 'critical', "
"'mean', or 'virial'")
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == 'c' or massfactor[-1].lower() == 'm':
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError("Massfactor " + str(massfactor) + " string not of the form "
"'#m', '#c', or 'virial'")
except (AttributeError, TypeError):
raise TypeError("Massfactor " + str(
massfactor) + " not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi ** 2 + 82.0 * Om_c - 39.0 * Om_c ** 2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == 'c':
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == 'm':
self.density_delta = delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
else:
raise ValueError("Invalid masstype '" + str(masstype) +
"'. Should be one of 'virial','c', or 'm'")
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 *
self.A_NFW(concentration))
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (
1.0 / 3.0)) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or :class:`~astropy.units.Quantity`
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or :class:`~astropy.units.Quantity`
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) /
self.r_virial)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt((v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) /
(reduced_radius * self.A_NFW(self.concentration)))
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if (self.mass.unit is None) and (self.input_units[self.inputs[0]] is None):
return {self.outputs[0]: u.M_sun / u.kpc ** 3}
elif (self.mass.unit is None):
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
elif (self.input_units[self.inputs[0]] is None):
return {self.outputs[0]: self.mass.unit / u.kpc ** 3}
else:
return {self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mass': u.M_sun,
"concentration": None,
"redshift": None}
| bsd-3-clause |
jobar8/interpies | setup.py | 1 | 2234 | #!/usr/bin/env python
# -*- coding: utf 8 -*-
"""
Python installation file.
"""
from setuptools import setup
import re
# convert README file
try:
from pypandoc import convert
long_description = convert('README.md', 'rst')
long_description = long_description.replace("\r","")
except (IOError, ImportError):
print("Pandoc not found. Long_description conversion failure.")
# pandoc is not installed, fallback to using raw contents
import io
with io.open('README.md', encoding="utf-8") as f:
long_description = f.read()
# find VERSION
version_file = 'interpies/__init__.py'
with open(version_file, 'r') as f:
version_string = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
f.read(), re.M)
if version_string is not None:
VERSION = version_string.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (version_file,))
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'
]
setup(
name='interpies',
version=VERSION,
packages=['interpies'],
install_requires=[
'numpy',
'scipy',
'matplotlib',
'rasterio>=1.0a9',
'scikit-learn',
'scikit-image'
],
url='https://github.com/jobar8/interpies',
license='BSD',
author='Joseph Barraud',
author_email='joseph.barraud@geophysicslabs.com',
description='A collection of functions for reading, displaying, transforming and analyzing geophysical data.',
long_description=long_description,
keywords=['geophysics raster gdal gravimetry magnetometry seismic'],
classifiers=CLASSIFIERS
)
| bsd-3-clause |
cluckmaster/MissionPlanner | Lib/site-packages/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
daodaoliang/neural-network-animation | matplotlib/tri/triinterpolate.py | 11 | 66355 | """
Interpolation inside triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from matplotlib.tri import Triangulation
from matplotlib.tri.trifinder import TriFinder
from matplotlib.tri.tritools import TriAnalyzer
import numpy as np
import warnings
__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
class TriInterpolator(object):
"""
Abstract base class for classes used to perform interpolation on
triangular grids.
Derived classes implement the following methods:
- ``__call__(x, y)`` ,
where x, y are array_like point coordinates of the same shape, and
that returns a masked array of the same shape containing the
interpolated z-values.
- ``gradient(x, y)`` ,
where x, y are array_like point coordinates of the same
shape, and that returns a list of 2 masked arrays of the same shape
containing the 2 derivatives of the interpolator (derivatives of
interpolated z values with respect to x and y).
"""
def __init__(self, triangulation, z, trifinder=None):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
self._z = np.asarray(z)
if self._z.shape != self._triangulation.x.shape:
raise ValueError("z array must have same length as triangulation x"
" and y arrays")
if trifinder is not None and not isinstance(trifinder, TriFinder):
raise ValueError("Expected a TriFinder object")
self._trifinder = trifinder or self._triangulation.get_trifinder()
# Default scaling factors : 1.0 (= no scaling)
# Scaling may be used for interpolations for which the order of
# magnitude of x, y has an impact on the interpolant definition.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._unit_x = 1.0
self._unit_y = 1.0
# Default triangle renumbering: None (= no renumbering)
# Renumbering may be used to avoid unecessary computations
# if complex calculations are done inside the Interpolator.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._tri_renum = None
# __call__ and gradient docstrings are shared by all subclasses
# (except, if needed, relevant additions).
# However these methods are only implemented in subclasses to avoid
# confusion in the documentation.
docstring__call__ = """
Returns a masked array containing interpolated values at the specified
x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
z : np.ma.array
Masked array of the same shape as *x* and *y* ; values
corresponding to (*x*, *y*) points outside of the triangulation
are masked out.
"""
docstringgradient = """
Returns a list of 2 masked arrays containing interpolated derivatives
at the specified x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
dzdx, dzdy : np.ma.array
2 masked arrays of the same shape as *x* and *y* ; values
corresponding to (x,y) points outside of the triangulation
are masked out.
The first returned array contains the values of
:math:`\\frac{\\partial z}{\\partial x}` and the second those of
:math:`\\frac{\\partial z}{\\partial y}`.
"""
def _interpolate_multikeys(self, x, y, tri_index=None,
return_keys=('z',)):
"""
Versatile (private) method defined for all TriInterpolators.
:meth:`_interpolate_multikeys` is a wrapper around method
:meth:`_interpolate_single_key` (to be defined in the child
subclasses).
:meth:`_interpolate_single_key actually performs the interpolation,
but only for 1-dimensional inputs and at valid locations (inside
unmasked triangles of the triangulation).
The purpose of :meth:`_interpolate_multikeys` is to implement the
following common tasks needed in all subclasses implementations:
- calculation of containing triangles
- dealing with more than one interpolation request at the same
location (e.g., if the 2 derivatives are requested, it is
unnecessary to compute the containing triangles twice)
- scaling according to self._unit_x, self._unit_y
- dealing with points outside of the grid (with fill value np.nan)
- dealing with multi-dimensionnal *x*, *y* arrays: flattening for
:meth:`_interpolate_params` call and final reshaping.
(Note that np.vectorize could do most of those things very well for
you, but it does it by function evaluations over successive tuples of
the input arrays. Therefore, this tends to be more time consuming than
using optimized numpy functions - e.g., np.dot - which can be used
easily on the flattened inputs, in the child-subclass methods
:meth:`_interpolate_single_key`.)
It is guaranteed that the calls to :meth:`_interpolate_single_key`
will be done with flattened (1-d) array_like input parameters `x`, `y`
and with flattened, valid `tri_index` arrays (no -1 index allowed).
Parameters
----------
x, y : array_like
x and y coordinates indicating where interpolated values are
requested.
tri_index : integer array_like, optional
Array of the containing triangle indices, same shape as
*x* and *y*. Defaults to None. If None, these indices
will be computed by a TriFinder instance.
(Note: For point outside the grid, tri_index[ipt] shall be -1).
return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
Defines the interpolation arrays to return, and in which order.
Returns
-------
ret : list of arrays
Each array-like contains the expected interpolated values in the
order defined by *return_keys* parameter.
"""
# Flattening and rescaling inputs arrays x, y
# (initial shape is stored for output)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
sh_ret = x.shape
if (x.shape != y.shape):
raise ValueError("x and y shall have same shapes."
" Given: {0} and {1}".format(x.shape, y.shape))
x = np.ravel(x)
y = np.ravel(y)
x_scaled = x/self._unit_x
y_scaled = y/self._unit_y
size_ret = np.size(x_scaled)
# Computes & ravels the element indexes, extract the valid ones.
if tri_index is None:
tri_index = self._trifinder(x, y)
else:
if (tri_index.shape != sh_ret):
raise ValueError(
"tri_index array is provided and shall"
" have same shape as x and y. Given: "
"{0} and {1}".format(tri_index.shape, sh_ret))
tri_index = np.ravel(tri_index)
mask_in = (tri_index != -1)
if self._tri_renum is None:
valid_tri_index = tri_index[mask_in]
else:
valid_tri_index = self._tri_renum[tri_index[mask_in]]
valid_x = x_scaled[mask_in]
valid_y = y_scaled[mask_in]
ret = []
for return_key in return_keys:
# Find the return index associated with the key.
try:
return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
except KeyError:
raise ValueError("return_keys items shall take values in"
" {'z', 'dzdx', 'dzdy'}")
# Sets the scale factor for f & df components
scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
# Computes the interpolation
ret_loc = np.empty(size_ret, dtype=np.float64)
ret_loc[~mask_in] = np.nan
ret_loc[mask_in] = self._interpolate_single_key(
return_key, valid_tri_index, valid_x, valid_y) * scale
ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
return ret
def _interpolate_single_key(self, return_key, tri_index, x, y):
"""
Performs the interpolation at points belonging to the triangulation
(inside an unmasked triangles).
Parameters
----------
return_index : string key from {'z', 'dzdx', 'dzdy'}
Identifies the requested values (z or its derivatives)
tri_index : 1d integer array
Valid triangle index (-1 prohibited)
x, y : 1d arrays, same shape as `tri_index`
Valid locations where interpolation is requested.
Returns
-------
ret : 1-d array
Returned array of the same size as *tri_index*
"""
raise NotImplementedError("TriInterpolator subclasses" +
"should implement _interpolate_single_key!")
class LinearTriInterpolator(TriInterpolator):
"""
A LinearTriInterpolator performs linear interpolation on a triangular grid.
Each triangle is represented by a plane so that an interpolated value at
point (x,y) lies on the plane of the triangle containing (x,y).
Interpolated values are therefore continuous across the triangulation, but
their first derivatives are discontinuous at edges between triangles.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If this is not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
"""
def __init__(self, triangulation, z, trifinder=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Store plane coefficients for fast interpolation calculations.
self._plane_coefficients = \
self._triangulation.calculate_plane_coefficients(self._z)
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient
def _interpolate_single_key(self, return_key, tri_index, x, y):
if return_key == 'z':
return (self._plane_coefficients[tri_index, 0]*x +
self._plane_coefficients[tri_index, 1]*y +
self._plane_coefficients[tri_index, 2])
elif return_key == 'dzdx':
return self._plane_coefficients[tri_index, 0]
elif return_key == 'dzdy':
return self._plane_coefficients[tri_index, 1]
else:
raise ValueError("Invalid return_key: " + return_key)
class CubicTriInterpolator(TriInterpolator):
"""
A CubicTriInterpolator performs cubic interpolation on triangular grids.
In one-dimension - on a segment - a cubic interpolating function is
defined by the values of the function and its derivative at both ends.
This is almost the same in 2-d inside a triangle, except that the values
of the function and its 2 derivatives have to be defined at each triangle
node.
The CubicTriInterpolator takes the value of the function at each node -
provided by the user - and internally computes the value of the
derivatives, resulting in a smooth interpolation.
(As a special feature, the user can also impose the value of the
derivatives at each node, but this is not supposed to be the common
usage.)
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
kind : {'min_E', 'geom', 'user'}, optional
Choice of the smoothing algorithm, in order to compute
the interpolant derivatives (defaults to 'min_E'):
- if 'min_E': (default) The derivatives at each node is computed
to minimize a bending energy.
- if 'geom': The derivatives at each node is computed as a
weighted average of relevant triangle normals. To be used for
speed optimization (large grids).
- if 'user': The user provides the argument `dz`, no computation
is hence needed.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
dz : tuple of array_likes (dzdx, dzdy), optional
Used only if *kind* ='user'. In this case *dz* must be provided as
(dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
are the interpolant first derivatives at the *triangulation* points.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
Notes
-----
This note is a bit technical and details the way a
:class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
interpolation.
The interpolation is based on a Clough-Tocher subdivision scheme of
the *triangulation* mesh (to make it clearer, each triangle of the
grid will be divided in 3 child-triangles, and on each child triangle
the interpolated function is a cubic polynomial of the 2 coordinates).
This technique originates from FEM (Finite Element Method) analysis;
the element used is a reduced Hsieh-Clough-Tocher (HCT)
element. Its shape functions are described in [1]_.
The assembled function is guaranteed to be C1-smooth, i.e. it is
continuous and its first derivatives are also continuous (this
is easy to show inside the triangles but is also true when crossing the
edges).
In the default case (*kind* ='min_E'), the interpolant minimizes a
curvature energy on the functional space generated by the HCT element
shape functions - with imposed values but arbitrary derivatives at each
node. The minimized functional is the integral of the so-called total
curvature (implementation based on an algorithm from [2]_ - PCG sparse
solver):
.. math::
E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left(
\\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 +
\\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 +
2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}}
\\right)^2 \\right) dx\\,dy
If the case *kind* ='geom' is chosen by the user, a simple geometric
approximation is used (weighted average of the triangle normal
vectors), which could improve speed on very large grids.
References
----------
.. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
Hsieh-Clough-Tocher triangles, complete or reduced.",
International Journal for Numerical Methods in Engineering,
17(5):784 - 789. 2.01.
.. [2] C.T. Kelley, "Iterative Methods for Optimization".
"""
def __init__(self, triangulation, z, kind='min_E', trifinder=None,
dz=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Loads the underlying c++ _triangulation.
# (During loading, reordering of triangulation._triangles may occur so
# that all final triangles are now anti-clockwise)
self._triangulation.get_cpp_triangulation()
# To build the stiffness matrix and avoid zero-energy spurious modes
# we will only store internally the valid (unmasked) triangles and
# the necessary (used) points coordinates.
# 2 renumbering tables need to be computed and stored:
# - a triangle renum table in order to translate the result from a
# TriFinder instance into the internal stored triangle number.
# - a node renum table to overwrite the self._z values into the new
# (used) node numbering.
tri_analyzer = TriAnalyzer(self._triangulation)
(compressed_triangles, compressed_x, compressed_y, tri_renum,
node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
self._triangles = compressed_triangles
self._tri_renum = tri_renum
# Taking into account the node renumbering in self._z:
node_mask = (node_renum == -1)
self._z[node_renum[~node_mask]] = self._z
self._z = self._z[~node_mask]
# Computing scale factors
self._unit_x = np.max(compressed_x) - np.min(compressed_x)
self._unit_y = np.max(compressed_y) - np.min(compressed_y)
self._pts = np.vstack((compressed_x/float(self._unit_x),
compressed_y/float(self._unit_y))).T
# Computing triangle points
self._tris_pts = self._pts[self._triangles]
# Computing eccentricities
self._eccs = self._compute_tri_eccentricities(self._tris_pts)
# Computing dof estimations for HCT triangle shape function
self._dof = self._compute_dof(kind, dz=dz)
# Loading HCT element
self._ReferenceElement = _ReducedHCT_Element()
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient + """
Examples
--------
An example of effective application is shown below (plot of the
direction of the vector field derivated from a known potential field):
.. plot:: mpl_examples/pylab_examples/trigradient_demo.py
"""
def _interpolate_single_key(self, return_key, tri_index, x, y):
tris_pts = self._tris_pts[tri_index]
alpha = self._get_alpha_vec(x, y, tris_pts)
ecc = self._eccs[tri_index]
dof = np.expand_dims(self._dof[tri_index], axis=1)
if return_key == 'z':
return self._ReferenceElement.get_function_values(
alpha, ecc, dof)
elif return_key in ['dzdx', 'dzdy']:
J = self._get_jacobian(tris_pts)
dzdx = self._ReferenceElement.get_function_derivatives(
alpha, J, ecc, dof)
if return_key == 'dzdx':
return dzdx[:, 0, 0]
else:
return dzdx[:, 1, 0]
else:
raise ValueError("Invalid return_key: " + return_key)
def _compute_dof(self, kind, dz=None):
"""
Computes and returns nodal dofs according to kind
Parameters
----------
kind: {'min_E', 'geom', 'user'}
Choice of the _DOF_estimator subclass to perform the gradient
estimation.
dz: tuple of array_likes (dzdx, dzdy), optional
Used only if *kind=user ; in this case passed to the
:class:`_DOF_estimator_user`.
Returns
-------
dof : array_like, shape (npts,2)
Estimation of the gradient at triangulation nodes (stored as
degree of freedoms of reduced-HCT triangle elements).
"""
if kind == 'user':
if dz is None:
raise ValueError("For a CubicTriInterpolator with "
"*kind*='user', a valid *dz* "
"argument is expected.")
TE = _DOF_estimator_user(self, dz=dz)
elif kind == 'geom':
TE = _DOF_estimator_geom(self)
elif kind == 'min_E':
TE = _DOF_estimator_min_E(self)
else:
raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; "
"should be one of: "
"'user', 'geom', 'min_E'".format(kind))
return TE.compute_dof_from_df()
@staticmethod
def _get_alpha_vec(x, y, tris_pts):
"""
Fast (vectorized) function to compute barycentric coordinates alpha.
Parameters
----------
x, y : array-like of dim 1 (shape (nx,))
Coordinates of the points whose points barycentric
coordinates are requested
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
alpha : array of dim 2 (shape (nx,3))
Barycentric coordinates of the points inside the containing
triangles.
"""
ndim = tris_pts.ndim-2
a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
abT = np.concatenate([np.expand_dims(a, ndim+1),
np.expand_dims(b, ndim+1)], ndim+1)
ab = _transpose_vectorized(abT)
x = np.expand_dims(x, ndim)
y = np.expand_dims(y, ndim)
OM = np.concatenate([x, y], ndim) - tris_pts[:, 0, :]
metric = _prod_vectorized(ab, abT)
# Here we try to deal with the colinear cases.
# metric_inv is in this case set to the Moore-Penrose pseudo-inverse
# meaning that we will still return a set of valid barycentric
# coordinates.
metric_inv = _pseudo_inv22sym_vectorized(metric)
Covar = _prod_vectorized(ab, _transpose_vectorized(
np.expand_dims(OM, ndim)))
ksi = _prod_vectorized(metric_inv, Covar)
alpha = _to_matrix_vectorized([
[1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
return alpha
@staticmethod
def _get_jacobian(tris_pts):
"""
Fast (vectorized) function to compute triangle jacobian matrix.
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
J : array of dim 3 (shape (nx,2,2))
Barycentric coordinates of the points inside the containing
triangles.
J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
itri, so that the following (matrix) relationship holds:
[dz/dksi] = [J] x [dz/dx]
with x: global coordinates
ksi: element parametric coordinates in triangle first apex
local basis.
"""
a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
[b[:, 0], b[:, 1]]])
return J
@staticmethod
def _compute_tri_eccentricities(tris_pts):
"""
Computes triangle eccentricities
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the triangles apexes.
Returns
-------
ecc : array like of dim 2 (shape: (nx,3))
The so-called eccentricity parameters [1] needed for
HCT triangular element.
"""
a = np.expand_dims(tris_pts[:, 2, :]-tris_pts[:, 1, :], axis=2)
b = np.expand_dims(tris_pts[:, 0, :]-tris_pts[:, 2, :], axis=2)
c = np.expand_dims(tris_pts[:, 1, :]-tris_pts[:, 0, :], axis=2)
# Do not use np.squeeze, this is dangerous if only one triangle
# in the triangulation...
dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
# Note that this line will raise a warning for dot_a, dot_b or dot_c
# zeros, but we choose not to support triangles with duplicate points.
return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
[(dot_a-dot_c) / dot_b],
[(dot_b-dot_a) / dot_c]])
# FEM element used for interpolation and for solving minimisation
# problem (Reduced HCT element)
class _ReducedHCT_Element():
"""
Implementation of reduced HCT triangular element with explicit shape
functions.
Computes z, dz, d2z and the element stiffness matrix for bending energy:
E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
*** Reference for the shape functions: ***
[1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
reduced.
Michel Bernadou, Kamal Hassan
International Journal for Numerical Methods in Engineering.
17(5):784 - 789. 2.01
*** Element description: ***
9 dofs: z and dz given at 3 apex
C1 (conform)
"""
# 1) Loads matrices to generate shape functions as a function of
# triangle eccentricities - based on [1] p.11 '''
M = np.array([
[ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
[ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
[ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
[ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
[ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
M0 = np.array([
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
[-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
M1 = np.array([
[-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
M2 = np.array([
[ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
# 2) Loads matrices to rotate components of gradient & Hessian
# vectors in the reference basis of triangle first apex (a0)
rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
[ 0., 1.], [-1., -1.],
[-1., -1.], [ 1., 0.]])
rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
[0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
[1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
# 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
# exact integral - 3 points on each subtriangles.
# NOTE: as the 2nd derivative is discontinuous , we really need those 9
# points!
n_gauss = 9
gauss_pts = np.array([[13./18., 4./18., 1./18.],
[ 4./18., 13./18., 1./18.],
[ 7./18., 7./18., 4./18.],
[ 1./18., 13./18., 4./18.],
[ 1./18., 4./18., 13./18.],
[ 4./18., 7./18., 7./18.],
[ 4./18., 1./18., 13./18.],
[13./18., 1./18., 4./18.],
[ 7./18., 4./18., 7./18.]], dtype=np.float64)
gauss_w = np.ones([9], dtype=np.float64) / 9.
# 4) Stiffness matrix for curvature energy
E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
# 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
def get_function_values(self, alpha, ecc, dofs):
"""
Parameters
----------
alpha : is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates,
ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities,
dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the N-array of interpolated function values.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
V = _to_matrix_vectorized([
[x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
[y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
prod = _prod_vectorized(self.M, V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, V))
s = _roll_vectorized(prod, 3*subtri, axis=0)
return _prod_vectorized(dofs, s)[:, 0, 0]
def get_function_derivatives(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices of
barycentric coordinates)
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
eccentricities)
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function derivatives [dz/dx, dz/dy]
in global coordinates at locations alpha, as a column-matrices of
shape (N x 2 x 1).
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
dV = _to_matrix_vectorized([
[ -3.*x_sq, -3.*x_sq],
[ 3.*y_sq, 0.],
[ 0., 3.*z_sq],
[ -2.*x*z, -2.*x*z+x_sq],
[-2.*x*y+x_sq, -2.*x*y],
[ 2.*x*y-y_sq, -y_sq],
[ 2.*y*z, y_sq],
[ z_sq, 2.*y*z],
[ -z_sq, 2.*x*z-z_sq],
[ x*z-y*z, x*y-y*z]])
# Puts back dV in first apex basis
dV = _prod_vectorized(dV, _extract_submatrices(
self.rotate_dV, subtri, block_size=2, axis=0))
prod = _prod_vectorized(self.M, dV)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, dV))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, dV))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, dV))
dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
dfdksi = _prod_vectorized(dofs, dsdksi)
# In global coordinates:
# Here we try to deal with the simpliest colinear cases, returning a
# null matrix.
J_inv = _safe_inv22_vectorized(J)
dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
return dfdx
def get_function_hessians(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function 2nd-derivatives
[d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
as a column-matrices of shape (N x 3 x 1).
"""
d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
H_rot = self.get_Hrot_from_J(J)
d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
return _transpose_vectorized(d2fdx2)
def get_d2Sidksij2(self, alpha, ecc):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
expressed in covariante coordinates in first apex basis.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
d2V = _to_matrix_vectorized([
[ 6.*x, 6.*x, 6.*x],
[ 6.*y, 0., 0.],
[ 0., 6.*z, 0.],
[ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
[2.*y-4.*x, 2.*y, 2.*y-2.*x],
[2.*x-4.*y, 0., -2.*y],
[ 2.*z, 0., 2.*y],
[ 0., 2.*y, 2.*z],
[ 0., 2.*x-4.*z, -2.*z],
[ -2.*z, -2.*y, x-y-z]])
# Puts back d2V in first apex basis
d2V = _prod_vectorized(d2V, _extract_submatrices(
self.rotate_d2V, subtri, block_size=3, axis=0))
prod = _prod_vectorized(self.M, d2V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, d2V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, d2V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, d2V))
d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
return d2sdksi2
def get_bending_matrices(self, J, ecc):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the element K matrices for bending energy expressed in
GLOBAL nodal coordinates.
K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
tri_J is needed to rotate dofs from local basis to global basis
"""
n = np.size(ecc, 0)
# 1) matrix to rotate dofs in global coordinates
J1 = _prod_vectorized(self.J0_to_J1, J)
J2 = _prod_vectorized(self.J0_to_J2, J)
DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
DOF_rot[:, 0, 0] = 1
DOF_rot[:, 3, 3] = 1
DOF_rot[:, 6, 6] = 1
DOF_rot[:, 1:3, 1:3] = J
DOF_rot[:, 4:6, 4:6] = J1
DOF_rot[:, 7:9, 7:9] = J2
# 2) matrix to rotate Hessian in global coordinates.
H_rot, area = self.get_Hrot_from_J(J, return_area=True)
# 3) Computes stiffness matrix
# Gauss quadrature.
K = np.zeros([n, 9, 9], dtype=np.float64)
weights = self.gauss_w
pts = self.gauss_pts
for igauss in range(self.n_gauss):
alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
alpha = np.expand_dims(alpha, 3)
weight = weights[igauss]
d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
_transpose_vectorized(d2Skdx2))
# 4) With nodal (not elem) dofs
K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
K), DOF_rot)
# 5) Need the area to compute total element energy
return _scalar_vectorized(area, K)
def get_Hrot_from_J(self, J, return_area=False):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
Returns
-------
Returns H_rot used to rotate Hessian from local basis of first apex,
to global coordinates.
if *return_area* is True, returns also the triangle area (0.5*det(J))
"""
# Here we try to deal with the simpliest colinear cases ; a null
# energy and area is imposed.
J_inv = _safe_inv22_vectorized(J)
Ji00 = J_inv[:, 0, 0]
Ji11 = J_inv[:, 1, 1]
Ji10 = J_inv[:, 1, 0]
Ji01 = J_inv[:, 0, 1]
H_rot = _to_matrix_vectorized([
[Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
[Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
[2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
if not return_area:
return H_rot
else:
area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
return H_rot, area
def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
"""
Builds K and F for the following elliptic formulation:
minimization of curvature energy with value of function at node
imposed and derivatives 'free'.
Builds the global Kff matrix in cco format.
Builds the full Ff vec Ff = - Kfc x Uc
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*triangles* is a (N x 3) array of nodes indexes.
*Uc* is (N x 3) array of imposed displacements at nodes
Returns
-------
(Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
(row, col) entries must be summed.
Ff: force vector - dim npts * 3
"""
ntri = np.size(ecc, 0)
vec_range = np.arange(ntri, dtype=np.int32)
c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1
f_dof = [1, 2, 4, 5, 7, 8]
c_dof = [0, 3, 6]
# vals, rows and cols indices in global dof numbering
f_dof_indices = _to_matrix_vectorized([[
c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
_transpose_vectorized(expand_indices))
f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
K_elem = self.get_bending_matrices(J, ecc)
# Extracting sub-matrices
# Explanation & notations:
# * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
# * Subscript c denotes 'condensated' (imposed) degrees of freedom
# (i.e. z at all nodes)
# * F = [Ff, Fc] is the force vector
# * U = [Uf, Uc] is the imposed dof vector
# [ Kff Kfc ]
# * K = [ ] is the laplacian stiffness matrix
# [ Kcf Kff ]
# * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
# Computing Kff stiffness matrix in sparse coo format
Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
# Computing Ff force vector in sparse coo format
Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
Uc_elem = np.expand_dims(Uc, axis=2)
Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
# Extracting Ff force vector in dense format
# We have to sum duplicate indices - using bincount
Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
return Kff_rows, Kff_cols, Kff_vals, Ff
# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
# _DOF_estimator_min_E
# Private classes used to compute the degree of freedom of each triangular
# element for the TriCubicInterpolator.
class _DOF_estimator():
"""
Abstract base class for classes used to perform estimation of a function
first derivatives, and deduce the dofs for a CubicTriInterpolator using a
reduced HCT element formulation.
Derived classes implement compute_df(self,**kwargs), returning
np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
gradient coordinates.
"""
def __init__(self, interpolator, **kwargs):
if not isinstance(interpolator, CubicTriInterpolator):
raise ValueError("Expected a CubicTriInterpolator object")
self._pts = interpolator._pts
self._tris_pts = interpolator._tris_pts
self.z = interpolator._z
self._triangles = interpolator._triangles
(self._unit_x, self._unit_y) = (interpolator._unit_x,
interpolator._unit_y)
self.dz = self.compute_dz(**kwargs)
self.compute_dof_from_df()
def compute_dz(self, **kwargs):
raise NotImplementedError
def compute_dof_from_df(self):
"""
Computes reduced-HCT elements degrees of freedom, knowing the
gradient.
"""
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
tri_z = self.z[self._triangles]
tri_dz = self.dz[self._triangles]
tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
return tri_dof
@staticmethod
def get_dof_vec(tri_z, tri_dz, J):
"""
Computes the dof vector of a triangle, knowing the value of f, df and
of the local Jacobian at each node.
*tri_z*: array of shape (3,) of f nodal values
*tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values
*J*: Jacobian matrix in local basis of apex 0
Returns dof array of shape (9,) so that for each apex iapex:
dof[iapex*3+0] = f(Ai)
dof[iapex*3+1] = df(Ai).(AiAi+)
dof[iapex*3+2] = df(Ai).(AiAi-)]
"""
npt = tri_z.shape[0]
dof = np.zeros([npt, 9], dtype=np.float64)
J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=3))
col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=3))
col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=3))
dfdksi = _to_matrix_vectorized([
[col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
[col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
dof[:, 0:7:3] = tri_z
dof[:, 1:8:3] = dfdksi[:, 0]
dof[:, 2:9:3] = dfdksi[:, 1]
return dof
class _DOF_estimator_user(_DOF_estimator):
""" dz is imposed by user / Accounts for scaling if any """
def compute_dz(self, dz):
(dzdx, dzdy) = dz
dzdx = dzdx * self._unit_x
dzdy = dzdy * self._unit_y
return np.vstack([dzdx, dzdy]).T
class _DOF_estimator_geom(_DOF_estimator):
""" Fast 'geometric' approximation, recommended for large arrays. """
def compute_dz(self):
"""
self.df is computed as weighted average of _triangles sharing a common
node. On each triangle itri f is first assumed linear (= ~f), which
allows to compute d~f[itri]
Then the following approximation of df nodal values is then proposed:
f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
The weighted coeff. w[itri] are proportional to the angle of the
triangle itri at apex ipt
"""
el_geom_w = self.compute_geom_weights()
el_geom_grad = self.compute_geom_grads()
# Sum of weights coeffs
w_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(el_geom_w))
# Sum of weighted df = (dfx, dfy)
dfx_el_w = np.empty_like(el_geom_w)
dfy_el_w = np.empty_like(el_geom_w)
for iapex in range(3):
dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
dfx_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfx_el_w))
dfy_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfy_el_w))
# Estimation of df
dfx_estim = dfx_node_sum/w_node_sum
dfy_estim = dfy_node_sum/w_node_sum
return np.vstack([dfx_estim, dfy_estim]).T
def compute_geom_weights(self):
"""
Builds the (nelems x 3) weights coeffs of _triangles angles,
renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
"""
weights = np.zeros([np.size(self._triangles, 0), 3])
tris_pts = self._tris_pts
for ipt in range(3):
p0 = tris_pts[:, (ipt) % 3, :]
p1 = tris_pts[:, (ipt+1) % 3, :]
p2 = tris_pts[:, (ipt-1) % 3, :]
alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
# In the below formula we could take modulo 2. but
# modulo 1. is safer regarding round-off errors (flat triangles).
angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))
# Weight proportional to angle up np.pi/2 ; null weight for
# degenerated cases 0. and np.pi (Note that `angle` is normalized
# by np.pi)
weights[:, ipt] = 0.5 - np.abs(angle-0.5)
return weights
def compute_geom_grads(self):
"""
Compute the (global) gradient component of f assumed linear (~f).
returns array df of shape (nelems,2)
df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
"""
tris_pts = self._tris_pts
tris_f = self.z[self._triangles]
dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
dM = np.dstack([dM1, dM2])
# Here we try to deal with the simpliest colinear cases: a null
# gradient is assumed in this case.
dM_inv = _safe_inv22_vectorized(dM)
dZ1 = tris_f[:, 1] - tris_f[:, 0]
dZ2 = tris_f[:, 2] - tris_f[:, 0]
dZ = np.vstack([dZ1, dZ2]).T
df = np.empty_like(dZ)
# With np.einsum : could be ej,eji -> ej
df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
return df
class _DOF_estimator_min_E(_DOF_estimator_geom):
"""
The 'smoothest' approximation, df is computed through global minimization
of the bending energy:
E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
"""
def __init__(self, Interpolator):
self._eccs = Interpolator._eccs
_DOF_estimator_geom.__init__(self, Interpolator)
def compute_dz(self):
"""
Elliptic solver for bending energy minimization.
Uses a dedicated 'toy' sparse Jacobi PCG solver.
"""
# Initial guess for iterative PCG solver.
dz_init = _DOF_estimator_geom.compute_dz(self)
Uf0 = np.ravel(dz_init)
reference_element = _ReducedHCT_Element()
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
eccs = self._eccs
triangles = self._triangles
Uc = self.z[self._triangles]
# Building stiffness matrix and force vector in coo format
Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
J, eccs, triangles, Uc)
# Building sparse matrix and solving minimization problem
# We could use scipy.sparse direct solver ; however to avoid this
# external dependency an implementation of a simple PCG solver with
# a simplendiagonal Jocabi preconditioner is implemented.
tol = 1.e-10
n_dof = Ff.shape[0]
Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
shape=(n_dof, n_dof))
Kff_coo.compress_csc()
Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
# If the PCG did not converge, we return the best guess between Uf0
# and Uf.
err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
if err0 < err:
# Maybe a good occasion to raise a warning here ?
warnings.warn("In TriCubicInterpolator initialization, PCG sparse"
" solver did not converge after 1000 iterations. "
"`geom` approximation is used instead of `min_E`")
Uf = Uf0
# Building dz from Uf
dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
dz[:, 0] = Uf[::2]
dz[:, 1] = Uf[1::2]
return dz
# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
# a PCG sparse solver for (symmetric) elliptic problems.
class _Sparse_Matrix_coo:
def __init__(self, vals, rows, cols, shape):
"""
Creates a sparse matrix in coo format
*vals*: arrays of values of non-null entries of the matrix
*rows*: int arrays of rows of non-null entries of the matrix
*cols*: int arrays of cols of non-null entries of the matrix
*shape*: 2-tuple (n,m) of matrix shape
"""
self.n, self.m = shape
self.vals = np.asarray(vals, dtype=np.float64)
self.rows = np.asarray(rows, dtype=np.int32)
self.cols = np.asarray(cols, dtype=np.int32)
def dot(self, V):
"""
Dot product of self by a vector *V* in sparse-dense to dense format
*V* dense vector of shape (self.m,)
"""
assert V.shape == (self.m,)
# For a more generic implementation we could use below kw argument
# minlength=self.m of bincount ; however:
# - it is new in numpy 1.6
# - it is unecessary when each row have at least 1 entry in global
# matrix, which is the case here.
return np.bincount(self.rows, weights=self.vals*V[self.cols])
def compress_csc(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csc format.
"""
_, unique, indices = np.unique(
self.rows + self.n*self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def compress_csr(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csr format.
"""
_, unique, indices = np.unique(
self.m*self.rows + self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def to_dense(self):
"""
Returns a dense matrix representing self.
Mainly for debugging purposes.
"""
ret = np.zeros([self.n, self.m], dtype=np.float64)
nvals = self.vals.size
for i in range(nvals):
ret[self.rows[i], self.cols[i]] += self.vals[i]
return ret
def __str__(self):
return self.to_dense().__str__()
@property
def diag(self):
"""
Returns the (dense) vector of the diagonal elements.
"""
in_diag = (self.rows == self.cols)
diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
diag[self.rows[in_diag]] = self.vals[in_diag]
return diag
def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
"""
Use Preconditioned Conjugate Gradient iteration to solve A x = b
A simple Jacobi (diagonal) preconditionner is used.
Parameters
----------
A: _Sparse_Matrix_coo
*A* must have been compressed before by compress_csc or
compress_csr method.
b: array
Right hand side of the linear system.
Returns
----------
x: array.
The converged solution.
err: float
The absolute error np.linalg.norm(A.dot(x) - b)
Other parameters
----------
x0: array.
Starting guess for the solution.
tol: float.
Tolerance to achieve. The algorithm terminates when the relative
residual is below tol.
maxiter: integer.
Maximum number of iterations. Iteration will stop
after maxiter steps even if the specified tolerance has not
been achieved.
"""
n = b.size
assert A.n == n
assert A.m == n
b_norm = np.linalg.norm(b)
# Jacobi pre-conditioner
kvec = A.diag
# For diag elem < 1e-6 we keep 1e-6.
kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)
# Initial guess
if x0 is None:
x = np.zeros(n)
else:
x = x0
r = b - A.dot(x)
w = r/kvec
p = np.zeros(n)
beta = 0.0
rho = np.dot(r, w)
k = 0
# Following C. T. Kelley
while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
p = w + beta*p
z = A.dot(p)
alpha = rho/np.dot(p, z)
r = r - alpha*z
w = r/kvec
rhoold = rho
rho = np.dot(r, w)
x = x + alpha*p
beta = rho/rhoold
#err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
k += 1
err = np.linalg.norm(A.dot(x) - b)
return x, err
# The following private functions:
# :func:`_inv22_vectorized`
# :func:`_safe_inv22_vectorized`
# :func:`_pseudo_inv22sym_vectorized`
# :func:`_prod_vectorized`
# :func:`_scalar_vectorized`
# :func:`_transpose_vectorized`
# :func:`_roll_vectorized`
# :func:`_to_matrix_vectorized`
# :func:`_extract_submatrices`
# provide fast numpy implementation of some standard operations on arrays of
# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
def _inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices.
"""
assert (M.ndim == 3)
assert (M.shape[-2:] == (2, 2))
M_inv = np.empty_like(M)
delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
# Development note: Dealing with pathologic 'flat' triangles in the
# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions
# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
#
# Goals:
# 1) The CubicTriInterpolator should be able to handle flat or almost flat
# triangles without raising an error,
# 2) These degenerated triangles should have no impact on the automatic dof
# calculation (associated with null weight for the _DOF_estimator_geom and
# with null energy for the _DOF_estimator_min_E),
# 3) Linear patch test should be passed exactly on degenerated meshes,
# 4) Interpolation (with :meth:`_interpolate_single_key` or
# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
# the pathologic triangles, to interact correctly with a TriRefiner class.
#
# Difficulties:
# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
# *metric* (the metric tensor = J x J.T). Computation of the local
# tangent plane is also problematic.
#
# Implementation:
# Most of the time, when computing the inverse of a rank-deficient matrix it
# is safe to simply return the null matrix (which is the implementation in
# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
# enforced by:
# - null area hence null energy in :class:`_DOF_estimator_min_E`
# - angles close or equal to 0 or np.pi hence null weight in
# :class:`_DOF_estimator_geom`.
# Note that the function angle -> weight is continuous and maximum for an
# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
# The exception is the computation of barycentric coordinates, which is done
# by inversion of the *metric* matrix. In this case, we need to compute a set
# of valid coordinates (1 among numerous possibilities), to ensure point 4).
# We benefit here from the symmetry of metric = J x J.T, which makes it easier
# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
def _safe_inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient
matrices.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
# We set delta_inv to 0. in case of a rank deficient matrix ; a
# rank-deficient input matrix *M* will lead to a null matrix in output
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
delta_inv = 1./delta
else:
# 'Pathologic' flow.
delta_inv = np.zeros(M.shape[0])
delta_inv[rank2] = 1./delta[rank2]
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
def _pseudo_inv22sym_vectorized(M):
"""
Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the
(Moore-Penrose) pseudo-inverse for rank-deficient matrices.
In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
In case M is of rank 0, we return the null matrix.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
M_inv[:, 0, 0] = M[:, 1, 1] / delta
M_inv[:, 0, 1] = -M[:, 0, 1] / delta
M_inv[:, 1, 0] = -M[:, 1, 0] / delta
M_inv[:, 1, 1] = M[:, 0, 0] / delta
else:
# 'Pathologic' flow.
# Here we have to deal with 2 sub-cases
# 1) First sub-case: matrices of rank 2:
delta = delta[rank2]
M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
# 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
rank01 = ~rank2
tr = M[rank01, 0, 0] + M[rank01, 1, 1]
tr_zeros = (np.abs(tr) < 1.e-8)
sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
#sq_tr_inv = 1. / tr**2
M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
return M_inv
def _prod_vectorized(M1, M2):
"""
Matrix product between arrays of matrices, or a matrix and an array of
matrices (*M1* and *M2*)
"""
sh1 = M1.shape
sh2 = M2.shape
assert len(sh1) >= 2
assert len(sh2) >= 2
assert sh1[-1] == sh2[-2]
ndim1 = len(sh1)
t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]
return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
M2[..., np.newaxis, :], -3)
def _scalar_vectorized(scalar, M):
"""
Scalar product between scalars and matrices.
"""
return scalar[:, np.newaxis, np.newaxis]*M
def _transpose_vectorized(M):
"""
Transposition of an array of matrices *M*.
"""
ndim = M.ndim
assert ndim == 3
return np.transpose(M, [0, ndim-1, ndim-2])
def _roll_vectorized(M, roll_indices, axis):
"""
Rolls an array of matrices along an axis according to an array of indices
*roll_indices*
*axis* can be either 0 (rolls rows) or 1 (rolls columns).
"""
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
elif axis == 1:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
def _to_matrix_vectorized(M):
"""
Builds an array of matrices from individuals np.arrays of identical
shapes.
*M*: ncols-list of nrows-lists of shape sh.
Returns M_res np.array of shape (sh, nrow, ncols) so that:
M_res[...,i,j] = M[i][j]
"""
assert isinstance(M, (tuple, list))
assert all([isinstance(item, (tuple, list)) for item in M])
c_vec = np.asarray([len(item) for item in M])
assert np.all(c_vec-c_vec[0] == 0)
r = len(M)
c = c_vec[0]
M00 = np.asarray(M[0][0])
dt = M00.dtype
sh = [M00.shape[0], r, c]
M_ret = np.empty(sh, dtype=dt)
for irow in range(r):
for icol in range(c):
M_ret[:, irow, icol] = np.asarray(M[irow][icol])
return M_ret
def _extract_submatrices(M, block_indices, block_size, axis):
"""
Extracts selected blocks of a matrices *M* depending on parameters
*block_indices* and *block_size*.
Returns the array of extracted matrices *Mres* so that:
M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
"""
assert block_indices.ndim == 1
assert axis in [0, 1]
r, c = M.shape
if axis == 0:
sh = [block_indices.shape[0], block_size, c]
elif axis == 1:
sh = [block_indices.shape[0], r, block_size]
dt = M.dtype
M_res = np.empty(sh, dtype=dt)
if axis == 0:
for ir in range(block_size):
M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
elif axis == 1:
for ic in range(block_size):
M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
return M_res
| mit |
mwoc/pydna | run_m2.py | 1 | 3235 | import sys
import getopt
import collections
from numpy import linspace
import matplotlib.pyplot as plt
import m2_s_t
from dna.model import IterateModel
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
def iterable(obj):
return isinstance(obj, collections.Iterable)
print('Loaded environment. Simulating...')
# Simulation conditions
cond = {}
cond['t_steam'] = 450
cond['p_hi'] = 100
cond['t_con'] = 20
cond['molefrac_rcvr'] = 0.4 # Weak condition
cond['molefrac_stor'] = 0.6 # Weak condition
cond['nu_is'] = 0.8
cond['nu_mech'] = 0.98
cond['nu_pump'] = 0.90
cond['Q_rcvr'] = 0
cond['Q_stor'] = 25000
cond['dT_con'] = 15
cond['pinch_hex'] = 5
cond['pinch_con'] = 4
cond['pinch_stor'] = 5
cond['Nseg'] = 11
cond['Nseg_con'] = 1
# Handle command line options
if len(sys.argv) > 1:
print(sys.argv)
_args = sys.argv.copy()
_args.pop(0)
optlist, args = getopt.getopt(_args, '', ['pressure=', 'y-rcvr=', 'y-stor=', 'y-lpp='])
for i, opt in enumerate(optlist):
if opt[0] == '--pressure':
cond['p_hi'] = float(opt[1])
elif opt[0] == '--y-rcvr':
cond['molefrac_rcvr'] = float(opt[1])
elif opt[0] == '--y-stor':
cond['molefrac_stor'] = float(opt[1])
elif opt[0] == '--y-lpp':
cond['molefrac_lpp'] = float(opt[1])
# Simulation guesses (iterate!!):
cond['molefrac_n15'] = cond['molefrac_stor']
cond['molefrac_n41'] = cond['molefrac_rcvr']
if not 'molefrac_lpp' in cond:
cond['molefrac_lpp'] = (cond['molefrac_rcvr'] + cond['molefrac_stor'])/4
cond['h_node5'] = False # That means no start value is given
cond['t_node15.1'] = False
cond['t_node41.1'] = False
cond['t_node17.1'] = False
# Pass initial conditions to model and run/iterate it
try:
runner = IterateModel(m2_s_t.MyModel, cond)
model = runner.run()
except KeyboardInterrupt:
# If it takes too long, we can also just return the last iteration
print('Halted execution..')
model = runner.lastRun
finally:
#eff = model.result['eff']
simname = 'm2-p{0:.2f}-ys{1:.2f}-yb{2:.2f}'.format(cond['p_hi'], cond['molefrac_n15'], cond['molefrac_n41'])
# Export result
model.export('m2/'+simname)
# Export log
runner.export('m2/'+simname+'-log')
print('Plotting...')
com = model.result
for i in com:
if iterable(com[i]) and 'Th' in com[i]:
curr = com[i]
# Efficiency calculation seems inaccurate. eff: {2:.2%},
_title = '{0} - Pinch: {1:.2f}, Q: {3:.2f} [kW]'.format(i.capitalize(), curr['dTmin'], curr['eff'], curr['Q'])
x = linspace(0,1,len(curr['Th']))
miny = round_down(min(min(curr['Tc']),min(curr['Th']))-1,10)
maxy = round_up(max(max(curr['Tc']),max(curr['Th']))+1,10)
plt.plot(x, curr['Th'], 'r->',label='Hot')
plt.plot(x, curr['Tc'], 'b-<',label='Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title(_title)
plt.ylim(miny,maxy)
plt.grid(True)
plt.savefig('output/m2/m2-pinch_' + str(i) + '.png')
plt.close()
else:
# Do nothing
pass
# Plot
print('Finished execution') | bsd-3-clause |
matplotlib/mpl-probscale | setup.py | 1 | 1593 | # Setup script for the probscale package
#
# Usage: python setup.py install
import os
from setuptools import setup, find_packages
DESCRIPTION = "mpl-probscale: Probabily scales for matplotlib"
LONG_DESCRIPTION = DESCRIPTION
NAME = "probscale"
VERSION = "0.2.3"
AUTHOR = "Paul Hobson (Geosyntec Consultants)"
AUTHOR_EMAIL = "phobson@geosyntec.com"
URL = "https://github.com/matplotlib/mpl-probscale"
DOWNLOAD_URL = "https://github.com/matplotlib/mpl-probscale/archive/master.zip"
LICENSE = "BSD 3-clause"
PACKAGES = find_packages()
PLATFORMS = "Python 2.7, 3.4 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
INSTALL_REQUIRES = ['numpy', 'matplotlib']
PACKAGE_DATA = {
'probscale.tests.baseline_images.test_viz': ['*png'],
'probscale.tests.baseline_images.test_probscale': ['*png'],
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
zip_safe=False,
)
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
DJArmstrong/autovet | Features/Centroiding/scripts/binning.py | 4 | 10376 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 21:18:27 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: mg719@cam.ac.uk
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy import stats #needed for stats.nanmean/median on ngtshead
######################################################################
# BINNING WITHOUT TIME GAPS
# !!! DO NOT USE FOR COMBINING DIFFERENT NIGHTS !!!
######################################################################
def binning1D(arr, bin_width, setting='mean', normalize=False):
""" WARNING: this does not respect boundaries between different night;
will average data from different nights"""
N_time = len(arr)
N_bins = np.int64(np.ceil(1.*N_time / bin_width))
binarr, binarr_err = np.zeros((2,N_bins))
bin_width = int(bin_width)
if setting=='mean':
for nn in range(N_bins):
binarr[nn] = np.nanmean(arr[nn*bin_width:(nn+1)*bin_width])
binarr_err[nn] = np.nanstd(arr[nn*bin_width:(nn+1)*bin_width])
if setting=='median':
for nn in range(N_bins):
binarr[nn] = np.nanmedian(arr[nn*bin_width:(nn+1)*bin_width])
binarr_err[nn] = 1.48 * np.nanmedian(abs(arr[nn*bin_width:(nn+1)*bin_width] - binarr[nn]))
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return binarr, binarr_err
def binning2D(arr, bin_width, setting='mean', normalize=False, axis=1):
#arr being 2D array, with objs on x and time stamps on y
""" WARNING: this does not respect boundaries between different night;
will average data from different nights"""
N_time = arr.shape[1]
# print N
N_objs = arr.shape[0]
# print N_objs
N_bins = np.int64(np.ceil(1.*N_time / bin_width))
# print N_bins
binarr, binarr_err = np.zeros((2,N_objs,N_bins))
# print arr.shape
# print binarr.shape
bin_width = int(bin_width)
if setting=='mean':
for nn in range(N_bins):
binarr[:,nn] = np.nanmean(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
binarr_err[:,nn] = np.nanstd(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
if setting=='median':
for nn in range(N_bins):
binarr[:,nn] = np.nanmedian(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
binarr_err[:,nn] = 1.48 * np.nanmedian(abs(arr[:,nn*bin_width:(nn+1)*bin_width] - binarr[:,nn]))
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
# print arr.shape
# print binarr.shape
return binarr, binarr_err
######################################################################
# BINNING WITH TIME GAPS
# !!! USE THIS FOR COMBINING DIFFERENT NIGHTS !!!
######################################################################
def bin_edge_indices(time1D, bin_width, timegap, N_time):
""" DETERMINE ALL THE BIN-EDGE-INDICES (TO NOT BIN OVER DIFFERENT NIGHTS)"""
""" this currently relies on the fact that timestamps for all are approximately the same
(given for the case of a HJD array that represents MJD values with small corrections)"""
# ind_start_of_night = np.append( 0 , np.where( np.diff(time) > timegap )[0] + 1 )
ind_end_of_night = np.append( np.where( np.diff(time1D) > timegap )[0], len(np.diff(time1D)-1 ) )
N_nights = len(ind_end_of_night)
first_ind = [0]
last_ind = []
i = 0
# j = 0
while ((first_ind[-1] < N_time) & (i < N_nights) ):
if (first_ind[-1]+bin_width) < ind_end_of_night[i]:
last_ind.append( first_ind[-1] + bin_width )
else:
last_ind.append( ind_end_of_night[i] )
i += 1
first_ind.append( last_ind[-1] + 1 )
# j += 1
del first_ind[-1]
return first_ind, last_ind
def binning1D_per_night(time, arr, bin_width, timegap=3600, setting='mean', normalize=False):
""" If time and arr are 1D arrays """
N_time = len(arr)
bin_width = int(bin_width)
first_ind, last_ind = bin_edge_indices(time, bin_width, timegap, N_time)
N_bins = len(first_ind)
bintime, binarr, binarr_err = np.zeros((3,N_bins)) * np.nan
if setting=='mean':
for nn in range(N_bins):
#skip no/single data points
if last_ind[nn] > first_ind[nn]:
bintime[nn] = np.nanmean( time[first_ind[nn]:last_ind[nn]] )
#skip All-NAN slices (i.e. where all flux data is masked)
if ( np.isnan(arr[first_ind[nn]:last_ind[nn]]).all() == False ):
binarr[nn] = np.nanmean( arr[first_ind[nn]:last_ind[nn]] )
binarr_err[nn] = np.nanstd( arr[first_ind[nn]:last_ind[nn]] )
elif setting=='median':
for nn in range(N_bins):
#skip no/single data points
if (last_ind[nn] > first_ind[nn]):
bintime[nn] = np.nanmedian( time[first_ind[nn]:last_ind[nn]] )
#skip All-NAN slices (i.e. where all flux data is masked)
if ( np.isnan(arr[first_ind[nn]:last_ind[nn]]).all() == False ):
binarr[nn] = np.nanmedian( arr[first_ind[nn]:last_ind[nn]] )
binarr_err[nn] = 1.48 * np.nanmedian( abs(arr[first_ind[nn]:last_ind[nn]] - binarr[nn]) )
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
def binning2D_per_night(time, arr, bin_width, timegap=3600, setting='mean', normalize=False, axis=1):
""" If time and arr are each a 2D array, with different objs on x and different time stamps on y"""
""" this currently relies on the fact that timestamps for all are approximately the same
(given for the case of a HJD array that represents MJD values with small corrections)"""
N_time = arr.shape[1]
N_objs = arr.shape[0]
bin_width = int(bin_width)
first_ind, last_ind = bin_edge_indices(time[0,:], bin_width, timegap, N_time)
N_bins = len(first_ind)
bintime, binarr, binarr_err = np.zeros((3,N_objs,N_bins))
if setting=='mean':
for nn in range(N_bins):
bintime[:,nn] = np.nanmean( time[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr[:,nn] = np.nanmean( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr_err[:,nn] = np.nanstd( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
elif setting=='median':
for nn in range(N_bins):
bintime[:,nn] = np.nanmedian( time[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr[:,nn] = np.nanmedian( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr_err[:,nn] = 1.48 * np.nanmedian( abs(arr[:,first_ind[nn]:last_ind[nn]] - binarr[:,nn]) )
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
def binning1D_per_night_list(time, arr, bin_width, timegap=3600, setting='mean', normalize=False):
""" different style of program, same application """
N = len(time)
bin_width = int(bin_width)
bintime = []
binarr = []
binarr_err = []
# ind_start_of_night = np.append( 0 , np.where( np.diff(time) > timegap )[0] + 1 )
ind_end_of_night = np.append( np.where( np.diff(time) > timegap )[0], len(np.diff(time)-1 ) )
N_nights = len(ind_end_of_night)
first_ind = 0
i = 0
if setting=='mean':
while ((first_ind < N) & (i < N_nights) ):
if (first_ind+bin_width) < ind_end_of_night[i]:
last_ind = first_ind+bin_width
else:
last_ind = ind_end_of_night[i]
i += 1
bintime.append( np.nanmean( time[first_ind:last_ind] ) )
binarr.append( np.nanmean( arr[first_ind:last_ind] ) )
binarr_err.append( np.nanstd(arr[first_ind:last_ind]) )
first_ind = last_ind + 1
elif setting=='median':
while first_ind < N:
if (first_ind+bin_width) < ind_end_of_night[i]:
last_ind = first_ind+bin_width
else:
last_ind = ind_end_of_night[i]
i += 1
bintime.append( np.nanmedian( time[first_ind:last_ind] ) )
binarr.append( np.nanmedian( arr[first_ind:last_ind] ) )
binarr_err.append( 1.48 * np.nanmedian(abs( arr[first_ind:last_ind] - binarr[-1])) )
first_ind = last_ind
bintime = np.array(bintime)
binarr = np.array(binarr)
binarr_err = np.array(binarr_err)
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
######################################################################
# MAIN (FOR TESTING)
######################################################################
if __name__ == '__main__':
######################################################################
# TEST binning2D_per_night
######################################################################
arr = np.array([[1,2,3,4,5,6, 67,68,64, -10,-11,-13], \
[1,2,3,4,5,6, 24,28,32, 10,11,13]])
time = np.array([[1,2,3,4,5,6, 10001,10002,10003, 20001,20002,20003], \
[1,2,3,4,5,6.1, 10001,10002.1,10003.3, 20001,20002,20003]])
bintime,binarr, _ = binning2D_per_night(time,arr,6)
plt.figure()
plt.plot(time,arr,'k.')
plt.plot(bintime,binarr,'r.')
######################################################################
# TEST binning1D_per_night
######################################################################
arr = np.array([1,2,3,4,5,6, 67,68,64, -10,-11,-13])
time = np.array([1,2,3,4,5,6, 10001,10002,10003, 20001,20002,20003])
bintime,binarr, _ = binning1D_per_night(time,arr,6)
plt.figure()
plt.plot(time,arr,'k.')
plt.plot(bintime,binarr,'r.') | gpl-3.0 |
flightgong/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 26 | 2752 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import metrics
from sklearn.metrics.metrics import confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(metrics.classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
wy36101299/technical-analysis | technical-analysis.py | 2 | 12671 |
# coding: utf-8
# In[1]:
# 啟動互動式繪圖環境
get_ipython().magic(u'pylab inline')
# In[2]:
# 引入相依套件
import numpy as np
import pandas as pd
from numpy import random
import matplotlib.pyplot as plt
# 引入檔案
# 資料來源 3008 大立光 2012-8/1 ~ 2014-12/09
# http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/genpage/Report201412/201412_F3_1_8_3008.php?STK_NO=3008&myear=2014&mmon=12
datapath = '/Users/wy/Desktop/3008.txt'
data = pd.read_csv(datapath)
# In[3]:
# 看檔案前N筆
data.head(5)
# In[4]:
# data詳細資料 總數,平均數,標準差...
data.describe()
# In[5]:
# 技術分析資料來源
# http://hymar.myweb.hinet.net/study/stock/theory/
# In[6]:
# Rise Ratio 漲幅比
def RR(data):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止 第一筆data沒有更舊的
if item-1 >=0:
# (今日收盤價 - 昨日收盤價)/昨日收盤價
tmp = (data['Close'][item-1]-data['Close'][item])/data['Close'][item]*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create RR 欄位
data['RR']=tmpSeries
# In[7]:
# 威廉指標(WMS%R或%R)
def WMS(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 9日WMS%R =(9日內最高價-第9日收盤價) / (9日內最高價-9日內最低價)*100
# [item-day+1:item+1] 今日區間 [item-day+1] 第N日 583-9=574+1=575
tmp = (data['High'][item-day+1:item+1].max()-data['Close'][item-day+1])/(data['High'][item-day+1:item+1].max()-data['Low'][item-day+1:item+1].min())*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create WMS 欄位
data['WMS']=tmpSeries
# In[8]:
# 買賣意願指標 day 建議26
def BR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 26日BR = (今日最高價 - 昨日收盤價)26天累計總數 / (昨日收盤價 - 今日最低價)26天累計總數
# [(item-day+1)-1:(item+1)-1] 有-1 今日區間 [(item-day+1):(item+1)] 昨日區間
tmp = (data['High'][(item-day+1)-1:(item+1)-1].sum()-data['Close'][item-day+1:item+1].sum())/(data['Close'][item-day+1:item+1].sum()-data['Low'][(item-day+1)-1:(item+1)-1].sum())
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create BR 欄位
data['BR']=tmpSeries
# In[9]:
# 買賣氣勢指標 day建議26
def AR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 26日AR = (最高價 - 開盤價)26天累計總數 / (開盤價 - 最低價)26天累計總數
# [item-day+1:item+1] 今日區間
tmp = (data['High'][item-day+1:item+1].sum()-data['Open'][item-day+1:item+1].sum())/(data['Open'][item-day+1:item+1].sum()-data['Low'][item-day+1:item+1].sum())
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create AR 欄位
data['AR']=tmpSeries
# In[10]:
# 平均成交量 mean volumn day建議12
def MV(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# N日平均量 = N日內的成交量總和 / N
# [item-day+1:item+1] 今日區間
tmp = data['Volume'][item-day+1:item+1].mean()
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create MV 欄位
data['MV']=tmpSeries
# In[11]:
# 移動平均線(MA,Moving Average) 建議12
def MA(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 移動平均數 = 採樣天數的股價合計 / 採樣天數
# [item-day+1:item+1] 今日區間
tmp = data['Close'][item-day+1:item+1].mean()
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create MA 欄位
data['MA'+str(day)]=tmpSeries
# In[12]:
# 心理線(PSY) 建議13
def PSY(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 13日PSY值 = ( 13日內之上漲天數 / 13 ) * 100
# [item-day+1-1:item+1-1] 跳一天 最早的天沒有RR值
count = 0
for a in data['RR'][item-day+1-1:item+1-1]:
if a > 0:
count+=1
tmp = float(count)/float(13)*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create PSY 欄位
data['PSY']=tmpSeries
# In[13]:
# 能量潮(OBV) 建議12
def OBV(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 今日OBV值 = 最近12天股價上漲日成交量總和 - 最近12天股價下跌日成交量總和
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
sumVolRise = data['Volume'][item-day+1-1:item+1-1][bolRise].sum()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
sumVolDesc = data['Volume'][item-day+1-1:item+1-1][bolDesc].sum()
tmp = sumVolRise-sumVolDesc
# 可切換 OBV累積12日移動平均值 = (最近12天股價上漲日成交量總和 - 最近12天股價下跌日成交量總和) / 12
# tmp = (sumVolRise-sumVolDesc)/12
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create OBV 欄位
data['OBV']=tmpSeries
# In[14]:
# 數量指標(VR) 建議12
def VR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# VR = ( N日內上漲日成交值總和 + 1/2*N日內平盤日成交值總和) / ( N日內下跌日成交值總和 + 1/2*N日內平盤日成交值總和)* 100%
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
sumVolRise = data['Volume'][item-day+1-1:item+1-1][bolRise].sum()
bolNorm = data['RR'][item-day+1-1:item+1-1] == 0
sumVolNorm = data['Volume'][item-day+1-1:item+1-1][bolNorm].sum()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
sumVolDesc = data['Volume'][item-day+1-1:item+1-1][bolDesc].sum()
tmp = (sumVolRise+0.5*sumVolNorm)/(sumVolDesc+0.5*sumVolNorm)*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create VR 欄位
data['VR']=tmpSeries
# In[15]:
# 相對強弱指標(RSI) 建議6
def RSI(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 6日RSI=100*6日內收盤上漲總幅度平均值 / (6日內收盤上漲總幅度平均值 - 6日內收盤下跌總幅度平均值)
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
meanRise = data['RR'][item-day+1-1:item+1-1][bolRise].mean()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
meanDesc = data['RR'][item-day+1-1:item+1-1][bolDesc].mean()
tmp = 100*meanRise/(meanRise-meanDesc)
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create RSI 欄位
data['RSI']=tmpSeries
# In[16]:
# 乖離率(BIAS)
def BIAS(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# N日乖離率 = (當日股價 - N日股價移動平均數) / N日平均股價
tmp = (data['Close'][item-day+1]-data['MA'+str(day)][item-day+1])/data['MA'+str(day)][item-day+1]*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create BIAS 欄位
data['BIAS']=tmpSeries
# In[17]:
# RR漲幅比須先算出來,後續指標需用到此項
RR(data)
WMS(data,9)
BR(data,26)
AR(data,26)
MV(data,12)
MA(data,12)
# 算BIAS須先求出MA值
BIAS(data,12)
PSY(data,13)
OBV(data,12)
VR(data,12)
RSI(data,6)
# In[18]:
MA(data,20)
MA(data,60)
# In[19]:
data
# In[20]:
# Rise Ratio 漲幅比
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['RR'])
ax.set_title('RR')
# In[21]:
# 威廉指標(WMS%R或%R)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['WMS'])
ax.set_title('WMS')
# In[22]:
# 買賣意願指標
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['BR'])
ax.set_title('BR')
# In[23]:
# 買賣氣勢指標
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['AR'])
ax.set_title('AR')
# In[24]:
# 平均成交量 mean volumn
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MV'])
ax.set_title('MV')
# In[25]:
# 移動平均線(MA,Moving Average)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MA12'])
ax.set_title('MA12')
# In[26]:
# 乖離率(BIAS)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['BIAS'])
ax.set_title('BIAS')
# In[27]:
# 心理線(PSY)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['PSY'])
ax.set_title('PSY')
# In[28]:
# 能量潮(OBV)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['OBV'])
ax.set_title('OBV')
# In[29]:
# 數量指標(VR)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['VR'])
ax.set_title('VR')
# In[30]:
# 相對強弱指標(RSI)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['RSI'])
ax.set_title('RSI')
# In[32]:
# MA20 和 MA60 比較 看出黃金交叉和死亡交叉
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MA20'],color='#9D442F',label='MA20')
ax.plot(data['MA60'],color='#5D947E',label='MA60')
ax.legend(loc='best')
# In[ ]:
| mit |
w2naf/davitpy | models/tsyganenko/__init__.py | 3 | 19327 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""
*********************
**Module**: models.tsyganenko
*********************
This modules containes the following object(s):
* :class:`models.tsyganenko.tsygTrace`: Wraps fortran subroutines in one convenient class
This module contains the following module(s):
* :mod:`models.tsyganenko.tsygFort`: Fortran subroutines
*******************************
"""
import tsygFort
class tsygTrace(object):
def __init__(self, lat=None, lon=None, rho=None, filename=None,
coords='geo', datetime=None,
vswgse=[-400.,0.,0.], pdyn=2., dst=-5., byimf=0., bzimf=-5.,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| **PACKAGE**: models.tsyganenko.trace
| **FUNCTION**: trace(lat, lon, rho, coords='geo', datetime=None,
| vswgse=[-400.,0.,0.], Pdyn=2., Dst=-5., ByIMF=0., BzIMF=-5.
| lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001)
| **PURPOSE**: trace magnetic field line(s) from point(s)
|
| **INPUTS**:
| **lat**: latitude [degrees]
| **lon**: longitude [degrees]
| **rho**: distance from center of the Earth [km]
| **filename**: load a trace object directly from a file
| **[coords]**: coordinates used for start point ['geo']
| **[datetime]**: a python datetime object
| **[vswgse]**: solar wind velocity in GSE coordinates [m/s, m/s, m/s]
| **[pdyn]**: solar wind dynamic pressure [nPa]
| **[dst]**: Dst index [nT]
| **[byimf]**: IMF By [nT]
| **[bzimf]**: IMF Bz [nT]
| **[lmax]**: maximum number of points to trace
| **[rmax]**: upper trace boundary in Re
| **[rmin]**: lower trace boundary in Re
| **[dsmax]**: maximum tracing step size
| **[err]**: tracing step tolerance
|
| **OUTPUTS**:
| Elements of this object:
| **.lat[N/S]H**: latitude of the trace footpoint in Northern/Southern hemispher
| **.lon[N/S]H**: longitude of the trace footpoint in Northern/Southern hemispher
| **.rho[N/S]H**: distance of the trace footpoint in Northern/Southern hemispher
|
| **EXAMPLES**:
from numpy import arange, zeros, ones
import tsyganenko
# trace a series of points
lats = arange(10, 90, 10)
lons = zeros(len(lats))
rhos = 6372.*ones(len(lats))
trace = tsyganenko.tsygTrace(lats, lons, rhos)
# Print the results nicely
print trace
# Plot the traced field lines
ax = trace.plot()
# Or generate a 3d view of the traced field lines
ax = trace.plot3d()
# Save your trace to a file for later use
trace.save('trace.dat')
# And when you want to re-use the saved trace
trace = tsyganenko.tsygTrace(filename='trace.dat')
|
| Written by Sebastien 2012-10
"""
from datetime import datetime as pydt
assert (None not in [lat, lon, rho]) or filename, 'You must provide either (lat, lon, rho) or a filename to read from'
if None not in [lat, lon, rho]:
self.lat = lat
self.lon = lon
self.rho = rho
self.coords = coords
self.vswgse = vswgse
self.pdyn = pdyn
self.dst = dst
self.byimf = byimf
self.bzimf = bzimf
# If no datetime is provided, defaults to today
if datetime==None: datetime = pydt.utcnow()
self.datetime = datetime
iTest = self.__test_valid__()
if not iTest: self.__del__()
self.trace()
elif filename:
self.load(filename)
def __test_valid__(self):
"""
| Test the validity of input arguments to the tsygTrace class and trace method
|
| Written by Sebastien 2012-10
"""
assert (len(self.vswgse) == 3), 'vswgse must have 3 elements'
assert (self.coords.lower() == 'geo'), '{}: this coordinae system is not supported'.format(self.coords.lower())
# A provision for those who want to batch trace
try:
[l for l in self.lat]
except:
self.lat = [self.lat]
try:
[l for l in self.lon]
except:
self.lon = [self.lon]
try:
[r for r in self.rho]
except:
self.rho = [self.rho]
try:
[d for d in self.datetime]
except:
self.datetime = [self.datetime for l in self.lat]
# Make sure they're all the sam elength
assert (len(self.lat) == len(self.lon) == len(self.rho) == len(self.datetime)), \
'lat, lon, rho and datetime must me the same length'
return True
def trace(self, lat=None, lon=None, rho=None, coords=None, datetime=None,
vswgse=None, pdyn=None, dst=None, byimf=None, bzimf=None,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| See tsygTrace for a description of each parameter
| Any unspecified parameter default to the one stored in the object
| Unspecified lmax, rmax, rmin, dsmax, err has a set default value
|
| Written by Sebastien 2012-10
"""
from numpy import radians, degrees, zeros
# Store existing values of class attributes in case something is wrong
# and we need to revert back to them
if lat: _lat = self.lat
if lon: _lon = self.lon
if rho: _rho = self.rho
if coords: _coords = self.coords
if vswgse: _vswgse = self.vswgse
if not datetime==None: _datetime = self.datetime
# Pass position if new
if lat: self.lat = lat
lat = self.lat
if lon: self.lon = lon
lon = self.lon
if rho: self.rho = rho
rho = self.rho
if not datetime==None: self.datetime = datetime
datetime = self.datetime
# Set necessary parameters if new
if coords: self.coords = coords
coords = self.coords
if not datetime==None: self.datetime = datetime
datetime = self.datetime
if vswgse: self.vswgse = vswgse
vswgse = self.vswgse
if pdyn: self.pdyn = pdyn
pdyn = self.pdyn
if dst: self.dst = dst
dst = self.dst
if byimf: self.byimf = byimf
byimf = self.byimf
if bzimf: self.bzimf = bzimf
bzimf = self.bzimf
# Test that everything is in order, if not revert to existing values
iTest = self.__test_valid__()
if not iTest:
if lat: self.lat = _lat
if lon: _self.lon = lon
if rho: self.rho = _rho
if coords: self.coords = _coords
if vswgse: self.vswgse = _vswgse
if not datetime==None: self.datetime = _datetime
# Declare the same Re as used in Tsyganenko models [km]
Re = 6371.2
# Initialize trace array
self.l = zeros(len(lat))
self.xTrace = zeros((len(lat),2*lmax))
self.yTrace = self.xTrace.copy()
self.zTrace = self.xTrace.copy()
self.xGsw = self.l.copy()
self.yGsw = self.l.copy()
self.zGsw = self.l.copy()
self.latNH = self.l.copy()
self.lonNH = self.l.copy()
self.rhoNH = self.l.copy()
self.latSH = self.l.copy()
self.lonSH = self.l.copy()
self.rhoSH = self.l.copy()
# And now iterate through the desired points
for ip in xrange(len(lat)):
# This has to be called first
tsygFort.recalc_08(datetime[ip].year,datetime[ip].timetuple().tm_yday,
datetime[ip].hour,datetime[ip].minute,datetime[ip].second,
vswgse[0],vswgse[1],vswgse[2])
# Convert lat,lon to geographic cartesian and then gsw
r, theta, phi, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
rho[ip]/Re, radians(90.-lat[ip]), radians(lon[ip]),
0., 0., 0.,
1)
if coords.lower() == 'geo':
xgeo, ygeo, zgeo, xgsw, ygsw, zgsw = tsygFort.geogsw_08(
xgeo, ygeo, zgeo,
0. ,0. ,0. ,
1)
self.xGsw[ip] = xgsw
self.yGsw[ip] = ygsw
self.zGsw[ip] = zgsw
# Trace field line
inmod = 'IGRF_GSW_08'
exmod = 'T96_01'
parmod = [pdyn, dst, byimf, bzimf, 0, 0, 0, 0, 0, 0]
# First towards southern hemisphere
maptoL = [-1, 1]
for mapto in maptoL:
xfgsw, yfgsw, zfgsw, xarr, yarr, zarr, l = tsygFort.trace_08( xgsw, ygsw, zgsw,
mapto, dsmax, err, rmax, rmin, 0,
parmod, exmod, inmod,
lmax )
# Convert back to spherical geographic coords
xfgeo, yfgeo, zfgeo, xfgsw, yfgsw, zfgsw = tsygFort.geogsw_08(
0. ,0. ,0. ,
xfgsw, yfgsw, zfgsw,
-1)
geoR, geoColat, geoLon, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
0., 0., 0.,
xfgeo, yfgeo, zfgeo,
-1)
# Get coordinates of traced point
if mapto == 1:
self.latSH[ip] = 90. - degrees(geoColat)
self.lonSH[ip] = degrees(geoLon)
self.rhoSH[ip] = geoR*Re
elif mapto == -1:
self.latNH[ip] = 90. - degrees(geoColat)
self.lonNH[ip] = degrees(geoLon)
self.rhoNH[ip] = geoR*Re
# Store trace
if mapto == -1:
self.xTrace[ip,0:l] = xarr[l-1::-1]
self.yTrace[ip,0:l] = yarr[l-1::-1]
self.zTrace[ip,0:l] = zarr[l-1::-1]
elif mapto == 1:
self.xTrace[ip,self.l[ip]:self.l[ip]+l] = xarr[0:l]
self.yTrace[ip,self.l[ip]:self.l[ip]+l] = yarr[0:l]
self.zTrace[ip,self.l[ip]:self.l[ip]+l] = zarr[0:l]
self.l[ip] += l
# Resize trace output to more minimum possible length
self.xTrace = self.xTrace[:,0:self.l.max()]
self.yTrace = self.yTrace[:,0:self.l.max()]
self.zTrace = self.zTrace[:,0:self.l.max()]
def __str__(self):
"""
| Print object information in a nice way
|
| Written by Sebastien 2012-10
"""
# Declare print format
outstr = '''
vswgse=[{:6.0f},{:6.0f},{:6.0f}] [m/s]
pdyn={:3.0f} [nPa]
dst={:3.0f} [nT]
byimf={:3.0f} [nT]
bzimf={:3.0f} [nT]
'''.format(self.vswgse[0],
self.vswgse[1],
self.vswgse[2],
self.pdyn,
self.dst,
self.byimf,
self.bzimf)
outstr += '\nCoords: {}\n'.format(self.coords)
outstr += '(latitude [degrees], longitude [degrees], distance from center of the Earth [km])\n'
# Print stuff
for ip in xrange(len(self.lat)):
outstr += '''
({:6.3f}, {:6.3f}, {:6.3f}) @ {}
--> NH({:6.3f}, {:6.3f}, {:6.3f})
--> SH({:6.3f}, {:6.3f}, {:6.3f})
'''.format(self.lat[ip], self.lon[ip], self.rho[ip],
self.datetime[ip].strftime('%H:%M UT (%d-%b-%y)'),
self.latNH[ip], self.lonNH[ip], self.rhoNH[ip],
self.latSH[ip], self.lonSH[ip], self.rhoSH[ip])
return outstr
def save(self, filename):
"""
| Save trace information to a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "wb" ) as fileObj:
pickle.dump(self, fileObj)
def load(self, filename):
"""
| load trace information from a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "rb" ) as fileObj:
obj = pickle.load(fileObj)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def plot(self, proj='xz', color='b', onlyPts=None, showPts=False,
showEarth=True, disp=True, **kwargs):
"""
| Generate a 2D plot of the trace projected onto a given plane
| Graphic keywords apply to the plot method for the field lines
|
| **INPUTS**:
| **plane**: the projection plane in GSW coordinates
| **onlyPts**: if the trace countains multiple point, only show the specified indices (list)
| **showEarth**: Toggle Earth disk visibility on/off
| **showPts**: Toggle start points visibility on/off
| **disp**: invoke pylab.show()
| **color**: field line color
| **kwargs**: see matplotlib.axes.Axes.plot
|
| **OUTPUTS**:
| **ax**: matplotlib axes object
|
| Written by Sebastien 2012-10
"""
from pylab import gcf, gca, show
from matplotlib.patches import Circle
from numpy import pi, linspace, outer, ones, size, cos, sin, radians, cross
from numpy.ma import masked_array
assert (len(proj) == 2) or \
(proj[0] in ['x','y','z'] and proj[1] in ['x','y','z']) or \
(proj[0] != proj[1]), 'Invalid projection plane'
fig = gcf()
ax = fig.gca()
ax.set_aspect('equal')
# First plot a nice disk for the Earth
if showEarth:
circ = Circle(xy=(0,0), radius=1, facecolor='0.8', edgecolor='k', alpha=.5, zorder=0)
ax.add_patch(circ)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
# Select projection plane
if proj[0] == 'x':
xx = self.xTrace[ip,0:self.l[ip]]
xpt = self.xGsw[ip]
ax.set_xlabel(r'$X_{GSW}$')
xdir = [1,0,0]
elif proj[0] == 'y':
xx = self.yTrace[ip,0:self.l[ip]]
xpt = self.yGsw[ip]
ax.set_xlabel(r'$Y_{GSW}$')
xdir = [0,1,0]
elif proj[0] == 'z':
xx = self.zTrace[ip,0:self.l[ip]]
xpt = self.zGsw[ip]
ax.set_xlabel(r'$Z_{GSW}$')
xdir = [0,0,1]
if proj[1] == 'x':
yy = self.xTrace[ip,0:self.l[ip]]
ypt = self.xGsw[ip]
ax.set_ylabel(r'$X_{GSW}$')
ydir = [1,0,0]
elif proj[1] == 'y':
yy = self.yTrace[ip,0:self.l[ip]]
ypt = self.yGsw[ip]
ax.set_ylabel(r'$Y_{GSW}$')
ydir = [0,1,0]
elif proj[1] == 'z':
yy = self.zTrace[ip,0:self.l[ip]]
ypt = self.zGsw[ip]
ax.set_ylabel(r'$Z_{GSW}$')
ydir = [0,0,1]
sign = 1 if -1 not in cross(xdir,ydir) else -1
if 'x' not in proj:
zz = sign*self.xGsw[ip]
indMask = sign*self.xTrace[ip,0:self.l[ip]] < 0
if 'y' not in proj:
zz = sign*self.yGsw[ip]
indMask = sign*self.yTrace[ip,0:self.l[ip]] < 0
if 'z' not in proj:
zz = sign*self.zGsw[ip]
indMask = sign*self.zTrace[ip,0:self.l[ip]] < 0
# Plot
ax.plot(masked_array(xx, mask=~indMask),
masked_array(yy, mask=~indMask),
zorder=-1, color=color, **kwargs)
ax.plot(masked_array(xx, mask=indMask),
masked_array(yy, mask=indMask),
zorder=1, color=color, **kwargs)
if showPts:
ax.scatter(xpt, ypt, c='k', s=40, zorder=zz)
if disp: show()
return ax
def plot3d(self, onlyPts=None, showEarth=True, showPts=False, disp=True,
xyzlim=None, zorder=1, linewidth=2, color='b', **kwargs):
"""
| Generate a 3D plot of the trace
| Graphic keywords apply to the plot3d method for the field lines
|
| **INPUTS**:
| **onlyPts**: if the trace countains multiple point, only show the specified indices (list)
| **showEarth**: Toggle Earth sphere visibility on/off
| **showPts**: Toggle start points visibility on/off
| **disp**: invoke pylab.show()
| **xyzlim**: 3D axis limits
| **zorder**: 3D layers ordering
| **linewidth**: field line width
| **color**: field line color
| **kwargs**: see mpl_toolkits.mplot3d.axes3d.Axes3D.plot3D
|
| **OUTPUTS**:
| **ax**: matplotlib axes object
|
| Written by Sebastien 2012-10
"""
from mpl_toolkits.mplot3d import proj3d
from numpy import pi, linspace, outer, ones, size, cos, sin, radians
from pylab import gca, gcf, show
fig = gcf()
ax = fig.gca(projection='3d')
# First plot a nice sphere for the Earth
if showEarth:
u = linspace(0, 2 * pi, 179)
v = linspace(0, pi, 179)
tx = outer(cos(u), sin(v))
ty = outer(sin(u), sin(v))
tz = outer(ones(size(u)), cos(v))
ax.plot_surface(tx,ty,tz,rstride=10, cstride=10, color='grey', alpha=.5, zorder=0, linewidth=0.5)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
ax.plot3D( self.xTrace[ip,0:self.l[ip]],
self.yTrace[ip,0:self.l[ip]],
self.zTrace[ip,0:self.l[ip]],
zorder=zorder, linewidth=linewidth, color=color, **kwargs)
if showPts:
ax.scatter3D(self.xGsw[ip], self.yGsw[ip], self.zGsw[ip], c='k')
# Set plot limits
if not xyzlim:
xyzlim = max( [ ax.get_xlim3d().max(),
ax.get_ylim3d().max(),
ax.get_zlim3d().max(), ] )
ax.set_xlim3d([-xyzlim,xyzlim])
ax.set_ylim3d([-xyzlim,xyzlim])
ax.set_zlim3d([-xyzlim,xyzlim])
if disp: show()
return ax
| gpl-3.0 |
Haunter17/MIR_SU17 | exp2/exp2_0bHiddenComparison.py | 1 | 8478 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
print('==> Experiment 2_0g')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
def runNeuralNet(num_freq, X_train, y_train, X_val, y_val, k1, k2, learningRate, batch_size, num_epochs, pooling_strategy):
# Neural-network model set-up
num_training_vec, total_features = X_train.shape
num_frames = int(total_features / num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(y_train.max(), y_val.max()) + 1)
l = num_frames
print("Num Classes: %g"%(num_classes))
print_freq = 1
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
# Set-up input and output label
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
# go straight from input to output, densely connected to SM layer
'''
W_sm = init_weight_variable([total_features, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(x, W_sm) + b_sm
'''
print("Running single convolutional layer with %g 1x1 filters"%(k1))
# single convolutional layer
W_conv1 = init_weight_variable([1, 1, 1, k1]) # Old: [num_freq, 1, 1, k1]
b_conv1 = init_bias_variable([k1])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = conv2d(x_image, W_conv1) + b_conv1 # tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1), no non-linearity
h_conv1_flat = tf.reshape(h_conv1, [-1, k1 * num_freq * num_frames]) #tf.reshape(h_conv1, [-1, num_frames * k1]) --- use this type of thing to make multiple scaled versions of data? enhance dataset?
W_sm = init_weight_variable([k1 * num_freq * num_frames, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
'''
# One hidden layer then softmax
numHiddenUnits = 100
W_1 = init_weight_variable([total_features, numHiddenUnits])
b_1 = init_bias_variable([numHiddenUnits])
W_sm = init_weight_variable([numHiddenUnits, num_classes])
b_sm = init_bias_variable([num_classes])
hiddenActivation = tf.nn.relu(tf.matmul(x, W_1) + b_1)
y_conv = tf.matmul(hiddenActivation, W_sm) + b_sm
'''
# second layer
#W_conv2 = init_weight_variable([1, l, k1, k2])
#b_conv2 = init_bias_variable([k2])
#h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
#h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])
#h_pool2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# softmax layer
#W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])
#b_sm = init_bias_variable([num_classes])
#y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm
# evaluations
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# session
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
# print("h_conv1 %s"%str(h_conv1.eval(feed_dict={x:X_train, y_:y_train})))
# print("W_sm is: %s"%str(W_sm.eval()))
# print("h_conv1_flat is: %s"%str(h_conv1_flat.eval(feed_dict={x:X_train, y_:y_train})))
# print("y_conv: %s"%str(y_conv.eval(feed_dict={x: X_train, y_: y_train})))
# print("y_ is : %s"%str(y_.eval(feed_dict={x:X_train, y_:y_train})))
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
epoch_numbers = []
# benchmark
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
epochEnd = time.time()
# printing and recording data
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
epoch_numbers += [epoch]
#print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
'''
Our Main
Command Line Arguments: (1) Length of horizontal window
'''
# load the data
[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp2/exp2_d15_1s_2.mat')
batchSize = 1000
numEpochs = 300
poolingStrategy = 'MAX'
# load in parameters - k1, k2, learning rate, and the filename to put the output image
try:
k1 = int(sys.argv[1])
k2 = int(sys.argv[2])
k3 = int(sys.argv[3])
imageFile = int(sys.argv[4])
except Exception, e:
print('-- {}'.format(e))
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, k1, k2, learningRate, batchSize, numEpochs, poolingStrategy)
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = epoch_numbers
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs')
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig(imageFile, format='png')
plt.close()
print('==> Done.')
'''
y_ = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
y_ = np.array([[0], [1], [2], [3], [3]])
x = np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29]])
x_val = np.array([[5, 6, 7, 8, 9, 10], [9, 10, 11, 12, 13, 14], [11, 12, 13, 14, 15, 16]])
y_val = np.array([[1], [3], [2]])
runNeuralNet(2, x, y_, x_val, y_val, 1, 300, 'MAX')
'''
'''
K1 = 10
--Time elapsed for training: 3057.72 seconds
-- Training accuracy: 0.2608
-- Validation accuracy: 0.8906
-- Training error: 8.5545E+00
-- Validation error: 4.2435E-01
==> Generating error plot...
''' | mit |
pkruskal/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
JosmanPS/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
DaveL17/matplotlib | matplotlib.indigoPlugin/Contents/Server Plugin/chart_calendar.py | 1 | 4390 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates the calendar charts
Given the unique nature of calendar charts, we use a separate method to
construct them.
-----
"""
# Built-in Modules
import calendar
import datetime as dt
import pickle
import sys
import traceback
# Third-party Modules
# Note the order and structure of matplotlib imports is intentional.
import matplotlib
matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done.
import matplotlib.pyplot as plt
# My modules
import chart_tools
log = chart_tools.log
payload = chart_tools.payload
props = payload['props']
chart_name = props['name']
p_dict = payload['p_dict']
plug_dict = payload['prefs']
log['Threaddebug'].append(u"chart_calendar.py called.")
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"{0}".format(payload))
try:
def __init__():
pass
fmt = {'short': {0: ["M", "T", "W", "T", "F", "S", "S"],
6: ["S", "M", "T", "W", "T", "F", "S"]},
'mid': {0: ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"],
6: ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]},
'long': {0: ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
6: ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]}
}
first_day = int(props.get('firstDayOfWeek', 6))
day_format = props.get('dayOfWeekFormat', 'mid')
days_labels = fmt[day_format][first_day]
my_cal = calendar.Calendar(first_day) # first day is Sunday = 6, Monday = 0
today = dt.datetime.today()
cal = my_cal.monthdatescalendar(today.year, today.month)
try:
height = int(props.get('customSizeHeight', 300)) / int(plt.rcParams['savefig.dpi'])
except ValueError:
height = 3
try:
width = int(props.get('customSizeWidth', 500)) / int(plt.rcParams['savefig.dpi'])
except ValueError:
width = 5
fig = plt.figure(figsize=(width, height))
ax = fig.add_subplot(111)
ax.axis('off')
# ============================= Plot Months Row =============================
month_row = ax.table(cellText=[" "],
colLabels=[dt.datetime.strftime(today, "%B")],
loc='top',
bbox=[0, 0.5, 1, .5] # bbox = [left, bottom, width, height]
)
chart_tools.format_axis(ax_obj=month_row)
# ============================= Plot Days Rows ==============================
# final_cal contains just the date value from the date object
final_cal = [[_.day if _.month == today.month else "" for _ in thing] for thing in cal]
days_rows = ax.table(cellText=final_cal,
colLabels=days_labels,
loc='top',
cellLoc=props.get('dayOfWeekAlignment', 'right'),
bbox=[0, -0.5, 1, 1.25]
)
chart_tools.format_axis(ax_obj=days_rows)
# ========================= Highlight Today's Date ==========================
t = dt.datetime.now().day # today's date
all_cal = [days_labels] + final_cal # days rows plus dates rows
# Find the index of today's date (t) in all_cal
highlight_date = [(i, all_cal.index(t)) for i, all_cal in enumerate(all_cal) if t in all_cal][0]
# Set the cell facecolor
highlight_color = p_dict.get('todayHighlight', '#555555')
days_rows.get_celld()[highlight_date].set_facecolor(highlight_color)
# ============================= Plot the Chart ==============================
# Note that subplots_adjust affects the space surrounding the subplots and not
# the fig.
plt.subplots_adjust(top=0.97,
bottom=0.34,
left=0.02,
right=0.98,
hspace=None,
wspace=None
)
chart_tools.save(logger=log)
except (KeyError, IndexError, ValueError, UnicodeEncodeError) as sub_error:
tb = traceback.format_exc()
chart_tools.log['Critical'].append(u"[{n}] {s}".format(n=chart_name, s=tb))
# ============================== Housekeeping ===============================
pickle.dump(chart_tools.log, sys.stdout)
| mit |
ian-r-rose/burnman | examples/example_composition.py | 1 | 7197 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_composition
-------------------
This example shows how to create different minerals, how to compute seismic
velocities, and how to compare them to a seismic reference model.
There are many different ways in BurnMan to combine minerals into a
composition. Here we present a couple of examples:
1. Two minerals mixed in simple mole fractions. Can be chosen from the BurnMan
libraries or from user defined minerals (see example_user_input_material)
2. Example with three minerals
3. Using preset solid solutions
4. Defining your own solid solution
To turn a method of mineral creation "on" the first if statement above the
method must be set to True, with all others set to False.
Note: These minerals can include a spin transition in (Mg,Fe)O, see
example_spintransition.py for explanation of how to implement this
*Uses:*
* :doc:`mineral_database`
* :class:`burnman.composite.Composite`
* :class:`burnman.mineral.Mineral`
* :class:`burnman.solidsolution.SolidSolution`
*Demonstrates:*
* Different ways to define a composite
* Using minerals and solid solutions
* Compare computations to seismic models
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
# To compute seismic velocities and other properties, we need to supply
# burnman with a list of minerals (phases) and their molar abundances. Minerals
# are classes found in burnman.minerals and are derived from
# burnman.minerals.material.
# Here are a few ways to define phases and molar_abundances:
# Example 1: two simple fixed minerals
if True:
amount_perovskite = 0.95
rock = burnman.Composite([minerals.SLB_2011.mg_perovskite(),
minerals.SLB_2011.periclase()],
[amount_perovskite, 1 - amount_perovskite])
# Example 2: three materials
if False:
rock = burnman.Composite([minerals.SLB_2011.fe_perovskite(),
minerals.SLB_2011.periclase(),
minerals.SLB_2011.stishovite()],
[0.7, 0.2, 0.1])
# Example 3: Mixing solid solutions
if False:
# Defining a rock using a predefined solid solution from the mineral
# library database.
preset_solidsolution = minerals.SLB_2011.mg_fe_perovskite()
# The line below is optional to see which endmembers (and in which order) are in the solid solution
# print preset_solidsolution.endmembers
# Set molar_fraction of mg_perovskite, fe_perovskite and al_perovskite
preset_solidsolution.set_composition(
[0.9, 0.1, 0.]) # Set molar_fraction of mg_perovskite, fe_perovskite and al_perovskite
rock = burnman.Composite(
[preset_solidsolution, minerals.SLB_2011.periclase()], [0.8, 0.2])
# Example 4: Defining your own solid solution
if False:
# Define a new SolidSolution with mg and fe perovskite endmembers
new_solidsolution = burnman.SolidSolution(name = 'New Mg-Fe bridgmanite',
endmembers = [[minerals.SLB_2011.mg_perovskite(),
'[Mg]SiO3'],
[minerals.SLB_2011.fe_perovskite(),
'[Fe]SiO3']],
solution_type = 'ideal')
# Set molar fraction of endmembers
new_solidsolution.set_composition([0.9, 0.1])
rock = burnman.Composite(
[new_solidsolution, minerals.SLB_2011.periclase()], [0.8, 0.2])
# seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
number_of_points = 20 # set on how many depth slices the computations should be done
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
# alternatively, we could use the values where prem is defined:
# depths = seismic_model.internal_depth_list(mindepth=700.e3,
# maxdepth=2800.e3)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
temperature = burnman.geotherm.brown_shankland(seis_p)
print("Calculations are done for:")
rock.debug_print()
mat_rho, mat_vp, mat_vphi, mat_vs, mat_K, mat_G = rock.evaluate(
['density', 'v_p', 'v_phi', 'v_s', 'K_S', 'G'], seis_p, temperature)
[vs_err, vphi_err, rho_err] = burnman.compare_chifactor(
[mat_vs, mat_vphi, mat_rho], [seis_vs, seis_vphi, seis_rho])
# PLOTTING
# plot vs
plt.subplot(2, 2, 1)
plt.plot(
seis_p / 1.e9, mat_vs / 1.e3, color='b', linestyle='-', marker='o',
markerfacecolor='b', markersize=4, label='computation')
plt.plot(
seis_p / 1.e9, seis_vs / 1.e3, color='k', linestyle='-', marker='o',
markerfacecolor='k', markersize=4, label='reference')
plt.title("Vs (km/s)")
plt.xlim(min(seis_p) / 1.e9, max(seis_p) / 1.e9)
plt.ylim(5.1, 7.6)
plt.legend(loc='lower right')
plt.text(40, 7.3, "misfit= %3.3f" % vs_err)
# plot Vphi
plt.subplot(2, 2, 2)
plt.plot(
seis_p / 1.e9, mat_vphi / 1.e3, color='b', linestyle='-', marker='o',
markerfacecolor='b', markersize=4)
plt.plot(
seis_p / 1.e9, seis_vphi / 1.e3, color='k', linestyle='-', marker='o',
markerfacecolor='k', markersize=4)
plt.title("Vphi (km/s)")
plt.xlim(min(seis_p) / 1.e9, max(seis_p) / 1.e9)
plt.ylim(7, 12)
plt.text(40, 11.5, "misfit= %3.3f" % vphi_err)
# plot density
plt.subplot(2, 2, 3)
plt.plot(
seis_p / 1.e9, mat_rho / 1.e3, color='b', linestyle='-', marker='o',
markerfacecolor='b', markersize=4)
plt.plot(
seis_p / 1.e9, seis_rho / 1.e3, color='k', linestyle='-', marker='o',
markerfacecolor='k', markersize=4)
plt.title("density ($\cdot 10^3$ kg/m$^3$)")
plt.xlim(min(seis_p) / 1.e9, max(seis_p) / 1.e9)
plt.text(40, 4.3, "misfit= %3.3f" % rho_err)
plt.xlabel("Pressure (GPa)")
# plot geotherm
plt.subplot(2, 2, 4)
plt.plot(seis_p / 1e9, temperature, color='r', linestyle='-', marker='o',
markerfacecolor='r', markersize=4)
plt.title("Geotherm (K)")
plt.xlim(min(seis_p) / 1.e9, max(seis_p) / 1.e9)
plt.xlabel("Pressure (GPa)")
plt.savefig("output_figures/example_composition.png")
plt.show()
| gpl-2.0 |
adamrvfisher/TechnicalAnalysisLibrary | CSVtoDF.py | 1 | 1762 | # -*- coding: utf-8 -*-
"""
Created on Sun May 21 11:47:12 2017
@author: AmatVictoriaCuramIII
"""
from pandas import read_csv
import pandas as pd
import os
CSVfiles = os.listdir('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV')
ranger = range(0,len(CSVfiles))
for i in ranger:
try:
temp = read_csv('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\' +
(CSVfiles[i]), sep = ',')
temp = temp.set_index('Date')
temp.index = pd.to_datetime(temp.index, format = "%Y/%m/%d")
temp = temp.loc[:,~temp.columns.duplicated()]
temp = temp[~temp.index.duplicated(keep='first')]
if not os.path.exists('F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-4]):
os.makedirs('F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-4])
pd.to_pickle(temp, 'F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-4] + '\\' + CSVfiles[i][:-4])
except OSError:
continue
for i in ranger:
try:
glaze = pd.read_pickle('F:\\Users\\AmatVictoriaCuram\\Database\\' +
(CSVfiles[i][:-4]))
for x in glaze.columns:
glaze[x] = pd.to_numeric(glaze[x], errors='coerce')
pd.to_pickle(glaze, 'F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-4])
except OSError:
continue
#this is for testing individual CSVs
#tester = read_csv('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\' +
# (df['CSVname'][0]), sep = ',')
#tester = tester.set_index('Date')
#pd.to_pickle(tester, 'F:\\Users\\AmatVictoriaCuram\\Database\\' + df['CSVname'][0][:-4]) | apache-2.0 |
chunweiyuan/xarray | asv_bench/benchmarks/interp.py | 1 | 1644 | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 3000
long_nx = 30000000
ny = 2000
nt = 1000
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt, ))
randn_long = randn((long_nx, ), frac_nan=0.1)
new_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100)
new_x_long = np.linspace(0.3 * nx, 0.7 * nx, 1000)
new_y_long = np.linspace(0.1, 0.9, 1000)
class Interpolation(object):
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{'var1': (('x', 'y'), randn_xy),
'var2': (('x', 't'), randn_xt),
'var3': (('t', ), randn_t)},
coords={'x': np.arange(nx),
'y': np.linspace(0, 1, ny),
't': pd.date_range('1970-01-01', periods=nt, freq='D'),
'x_coords': ('x', np.linspace(1.1, 2.1, nx))})
@parameterized(['method', 'is_short'],
(['linear', 'cubic'], [True, False]))
def time_interpolation(self, method, is_short):
new_x = new_x_short if is_short else new_x_long
self.ds.interp(x=new_x, method=method).load()
@parameterized(['method'],
(['linear', 'nearest']))
def time_interpolation_2d(self, method):
self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()
class InterpolationDask(Interpolation):
def setup(self, *args, **kwargs):
requires_dask()
super(InterpolationDask, self).setup(**kwargs)
self.ds = self.ds.chunk({'t': 50})
| apache-2.0 |
taknevski/tensorflow-xsmm | tensorflow/tools/dist_test/python/census_widendeep.py | 54 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
LedaLima/incubator-spot | spot-oa/oa/flow/flow_oa.py | 6 | 19125 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import shutil
import os
import sys
import json
import numpy as np
import linecache, bisect
import csv, math
import pandas as pd
import subprocess
import numbers
import api.resources.hdfs_client as HDFSClient
import api.resources.impala_engine as impala
from collections import OrderedDict
from multiprocessing import Process
from utils import Util, ProgressBar
from components.data.data import Data
from components.geoloc.geoloc import GeoLocalization
from components.reputation.gti import gti
from impala.util import as_pandas
import time
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.Flow') if logger else Util.get_logger('OA.Flow',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "flow"
self._flow_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._flow_scores = []
self._results_delimiter = '\t'
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# # get scores fields conf
conf_file = "{0}/flow_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
def start(self):
####################
start = time.time()
####################
self._create_folder_structure()
self._clear_previous_executions()
self._add_ipynb()
self._get_flow_results()
self._add_network_context()
self._add_geo_localization()
self._add_reputation()
self._create_flow_scores()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _clear_previous_executions(self):
self._logger.info("Cleaning data from previous executions for the day")
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
table_schema = []
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
table_schema=['suspicious', 'edge','chords','threat_investigation', 'timeline', 'storyboard', 'summary' ]
for path in table_schema:
HDFSClient.delete_folder("{0}/{1}/hive/oa/{2}/y={3}/m={4}/d={5}".format(HUSER,self._table_name,path,yr,int(mn),int(dy)),user="impala")
impala.execute_query("invalidate metadata")
#removes Feedback file
HDFSClient.delete_folder("{0}/{1}/scored_results/{2}{3}{4}/feedback/ml_feedback.csv".format(HUSER,self._table_name,yr,mn,dy))
#removes json files from the storyboard
HDFSClient.delete_folder("{0}/{1}/oa/{2}/{3}/{4}/{5}".format(HUSER,self._table_name,"storyboard",yr,mn,dy))
def _create_folder_structure(self):
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("flow",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding the advanced mode IPython Notebook")
shutil.copy("{0}/ipynb_templates/Advanced_Mode_master.ipynb".format(self._scrtip_path),"{0}/Advanced_Mode.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_flow_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
flow_results = "{0}/flow_results.csv".format(self._data_path)
# get hdfs path from conf file
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/flow/scored_results/{1}/scores/flow_results.csv".format(HUSER,self._date)
# get results file from hdfs
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
# valdiate files exists
if os.path.isfile(flow_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} flow results file: {1}".format(self._date,flow_results))
self._flow_results = Util.read_results(flow_results,self._limit,self._results_delimiter)
if len(self._flow_results) == 0: self._logger.error("There are not flow results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# filter results add rank.
self._logger.info("Filtering required columns based on configuration")
self._flow_scores.extend([ [ conn[i] for i in self._conf['column_indexes_filter'] ] + [n] for n, conn in enumerate(self._flow_results) ])
def _create_flow_scores(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
value_string = ""
for row in self._flow_scores:
value_string += str(tuple(Util.cast_val(item) for item in row)) + ","
load_into_impala = ("""
INSERT INTO {0}.flow_scores partition(y={2}, m={3}, d={4}) VALUES {1}
""").format(self._db, value_string[:-1], yr, mn, dy)
impala.execute_query(load_into_impala)
def _add_network_context(self):
# use ipranges to see if the IPs are internals.
ip_ranges_file = "{0}/context/ipranges.csv".format(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# add values to srcIpInternal and destIpInternal.
flow_scores = iter(self._flow_scores)
if os.path.isfile(ip_ranges_file):
self._logger.info("Start adding network context...")
# get ranges from configuration file.
self._logger.info("Reading network context file: {0}".format(ip_ranges_file))
with open(ip_ranges_file, 'rb') as f:
nc_ranges = [ map(Util.ip_to_int,line.strip('\n').split(',')) for line in f ]
# get src and dst IPs
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# add networkcontext per connection.
ip_internal_ranges = filter(None,nc_ranges)
self._logger.info("Adding networkcontext to suspicious connections.")
self._flow_scores = [ conn + [ self._is_ip_internal(conn[src_ip_index],ip_internal_ranges)]+[ self._is_ip_internal(conn[dst_ip_index],ip_internal_ranges)] for conn in flow_scores]
else:
self._flow_scores = [ conn + [0,0] for conn in flow_scores ]
self._logger.info("WARNING: Network context was not added because the file ipranges.csv does not exist.")
def _is_ip_internal(self,ip, ranges):
result = 0
for row in ranges:
if Util.ip_to_int(ip) >= row[0] and Util.ip_to_int(ip) <= row[1]:
result = 1
break
return result
def _add_geo_localization(self):
# use ipranges to see if the IPs are internals.
iploc_file = "{0}/context/iploc.csv".format(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
self._logger.info("Adding geo localization headers")
# add values to srcIpInternal and destIpInternal.
flow_scores = iter(self._flow_scores)
if os.path.isfile(iploc_file):
self._logger.info("Initializing geo localization component")
geo = GeoLocalization(iploc_file,self._logger)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
self._logger.info("Adding geo localization...")
self._flow_scores = []
for conn in flow_scores:
# get geo localizastin for src ip
self._logger.debug("Searching geo for src ip {0}".format(conn[src_ip_index]))
src_geo_dict = geo.get_ip_geo_localization(conn[src_ip_index])
# get goe localization for dst ip.
self._logger.debug("Searching geo for dst ip {0}".format(conn[dst_ip_index]))
dst_geo_dict = geo.get_ip_geo_localization(conn[dst_ip_index])
# adding columns to the current connection list.
conn.extend([src_geo_dict["geo_loc"],dst_geo_dict["geo_loc"],src_geo_dict["domain"],dst_geo_dict["domain"]])
self._flow_scores.extend([conn])
else:
self._flow_scores = [ conn + ["","","",""] for conn in flow_scores ]
self._logger.info("WARNING: IP location was not added because the file {0} does not exist.".format(iploc_file))
def _add_reputation(self):
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# read configuration.
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
if "gti" in rep_conf and os.path.isfile(rep_conf['gti']['refclient']):
rep_conf = rep_conf['gti']
# initialize gti module.
self._logger.info("Initializing GTI component")
flow_gti = gti.Reputation(rep_conf,self._logger)
# get all src ips.
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
self._logger.info("Getting GTI reputation for src IPs")
flow_scores_src = iter(self._flow_scores)
# getting reputation for src IPs
src_ips = [ conn[src_ip_index] for conn in flow_scores_src ]
src_rep_results = flow_gti.check(src_ips)
self._logger.info("Getting GTI reputation for dst IPs")
flow_scores_dst = iter(self._flow_scores)
# getting reputation for dst IPs
dst_ips = [ conn[dst_ip_index] for conn in flow_scores_dst ]
dst_rep_results = flow_gti.check(dst_ips)
flow_scores_final = iter(self._flow_scores)
self._flow_scores = []
flow_scores = [conn + [src_rep_results[conn[src_ip_index]]] + [dst_rep_results[conn[dst_ip_index]]] for conn in flow_scores_final ]
self._flow_scores = flow_scores
else:
# add values to gtiSrcRep and gtiDstRep.
flow_scores = iter(self._flow_scores)
self._flow_scores = [ conn + ["",""] for conn in flow_scores ]
self._logger.info("WARNING: IP reputation was not added. No refclient configured")
def _get_oa_details(self):
self._logger.info("Getting OA Flow suspicious details/chord diagram")
# start suspicious connects details process.
p_sp = Process(target=self._get_suspicious_details)
p_sp.start()
# start chord diagram process.
p_ch = Process(target=self._get_chord_details)
p_ch.start()
p_sp.join()
p_ch.join()
def _get_suspicious_details(self,bar=None):
# skip header
sp_connections = iter(self._flow_scores)
# loop connections.
connections_added = []
for conn in sp_connections:
# validate if the connection's details are not already extracted.
if conn in connections_added:
continue
else:
connections_added.append(conn)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# get src ip
sip = conn[src_ip_index]
# get dst ip
dip = conn[dst_ip_index]
# get hour and date (i.e. 2014-07-08 10:10:40)
date_array = conn[0].split(' ')
date_array_1 = date_array[0].split('-')
date_array_2 = date_array[1].split(':')
yr = date_array_1[0]
dy = date_array_1[2]
mh = date_array_1[1]
hr = date_array_2[0]
mm = date_array_2[1]
query_to_load = ("""
INSERT INTO TABLE {0}.flow_edge PARTITION (y={2}, m={3}, d={4})
SELECT treceived as tstart,sip as srcip,dip as dstip,sport as sport,dport as dport,proto as proto,flag as flags,
stos as tos,ibyt as ibyt,ipkt as ipkt, input as input, output as output,rip as rip, obyt as obyt,
opkt as opkt, h as hh, trminute as mn from {0}.{1} where ((sip='{7}' AND dip='{8}') or (sip='{8}' AND dip='{7}'))
AND y={2} AND m={3} AND d={4} AND h={5} AND trminute={6};
""").format(self._db,self._table_name,yr, mh, dy, hr, mm, sip,dip)
impala.execute_query(query_to_load)
def _get_chord_details(self,bar=None):
# skip header
sp_connections = iter(self._flow_scores)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
# get number of times each IP appears.
srcdict = {}
for conn in sp_connections:
if conn[src_ip_index] in srcdict:srcdict[conn[src_ip_index]] += 1
else:srcdict[conn[src_ip_index]] = 1
if conn[dst_ip_index] in srcdict:srcdict[conn[dst_ip_index]] += 1
else:srcdict[conn[dst_ip_index]] = 1
for (ip,n) in srcdict.items():
if n > 1:
ip_list = []
sp_connections = iter(self._flow_scores)
for row in sp_connections:
if ip == row[1] : ip_list.append(row[2])
if ip == row[2] :ip_list.append(row[1])
ips = list(set(ip_list))
if len(ips) > 1:
ips_filter = (",".join(str("'{0}'".format(ip)) for ip in ips))
query_to_load = ("""
INSERT INTO TABLE {0}.flow_chords PARTITION (y={2}, m={3}, d={4})
SELECT '{5}' as ip_threat, sip as srcip, dip as dstip, SUM(ibyt) as ibyt, SUM(ipkt) as ipkt from {0}.{1} where y={2} and m={3}
and d={4} and ((sip='{5}' and dip IN({6})) or (sip IN({6}) and dip='{5}')) group by sip,dip,m,d;
""").format(self._db,self._table_name,yr,mn,dy,ip,ips_filter)
impala.execute_query(query_to_load)
def _ingest_summary(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
self._logger.info("Getting ingest summary data for the day")
ingest_summary_cols = ["date","total"]
result_rows = []
df_filtered = pd.DataFrame()
# get ingest summary.
query_to_load=("""
SELECT tryear, trmonth, trday, trhour, trminute, COUNT(*) as total
FROM {0}.{1} WHERE y={2} AND m={3} AND d={4}
AND unix_tstamp IS NOT NULL
AND sip IS NOT NULL
AND sport IS NOT NULL
AND dip IS NOT NULL
AND dport IS NOT NULL
AND ibyt IS NOT NULL
AND ipkt IS NOT NULL
AND tryear={2}
AND cast(treceived as timestamp) IS NOT NULL
GROUP BY tryear, trmonth, trday, trhour, trminute;
""").format(self._db,self._table_name, yr, mn, dy)
results = impala.execute_query(query_to_load)
if results:
df_results = as_pandas(results)
#Forms a new dataframe splitting the minutes from the time column
df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(val['tryear'],val['trmonth'],val['trday'], val['trhour'], val['trminute']), int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df_results.iterrows()],columns = ingest_summary_cols)
value_string = ''
#Groups the data by minute
sf = df_new.groupby(by=['date'])['total'].sum()
df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
df_final = df_filtered.append(df_per_min, ignore_index=True).to_records(False,False)
if len(df_final) > 0:
query_to_insert=("""
INSERT INTO {0}.flow_ingest_summary PARTITION (y={1}, m={2}, d={3}) VALUES {4};
""").format(self._db, yr, mn, dy, tuple(df_final))
impala.execute_query(query_to_insert)
else:
self._logger.info("No data found for the ingest summary")
| apache-2.0 |
anne-urai/serialDDM | old_code/old-hddm/JW_runHDDM.py | 2 | 14138 | #!/usr/bin/env python
# encoding: utf-8
"""
Created by Jan Willem de Gee on 2011-02-16.
Adapted by Anne Urai, 2016
"""
import os, sys, pickle, time
import datetime
import math
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import glob
import itertools
import pp
from IPython import embed as shell
import hddm
import kabuki
import scipy.io # for writing to mat file
#import mne
#import statsmodels.formula.api as sm
#sys.path.append(os.environ['ANALYSIS_HOME'])
#from Tools.other_scripts import functions_jw as myfuncs
# params:
version = 0
run = True
# standard params:
model_base_name = '2ifc_MEGdata_'
model_names = ['1']
nr_samples = 20000 # 50.000 for good results
nr_models = 1 # to test
parallel = False # parallel python not working on MBP
accuracy_coding = False
# -----------------
# drift diffusion -
# -----------------
def run_model(trace_id, data, model_dir, model_name, samples=10000, accuracy_coding=False):
import hddm
m = hddm.HDDMStimCoding(data, stim_col='stimulus', split_param='v', drift_criterion=True, bias=True, include=('sv'),
group_only_nodes=['sv'], depends_on={'t':['drug', 'sessionnr'],
'v':['drug', 'sessionnr'], 'a':['drug', 'sessionnr'], 'dc':['drug', 'sessionnr'],
'z':['drug', 'sessionnr'], }, p_outlier=.05)
m.find_starting_values()
m.sample(samples, burn=samples/10, thin=3,
dbname=os.path.join(model_dir, model_name+ '_db{}'.format(trace_id)), db='pickle')
return m
def drift_diffusion_hddm(data, samples=10000, n_jobs=6, run=True, parallel=True, model_name='model', model_dir='.', accuracy_coding=False):
import hddm
import os
# run the model:
if run:
if parallel:
job_server = pp.Server(ppservers=(), ncpus=n_jobs)
start_time = time.time()
jobs = [(trace_id, job_server.submit(run_model,(trace_id, data, model_dir, model_name, samples, accuracy_coding), (), ('hddm',))) for trace_id in range(n_jobs)]
results = []
shell()
for s, job in jobs:
results.append(job())
print "Time elapsed: ", time.time() - start_time, "s"
job_server.print_stats()
# save:
for i in range(n_jobs):
model = results[i]
model.save(os.path.join(model_dir, '{}_{}'.format(model_name,i)))
else:
start_time = time.time()
model = run_model(3, data, model_dir, model_name, samples, accuracy_coding)
model.save(os.path.join(model_dir, '{}_md{}'.format(model_name, 3)))
# print point estimates
results = model.gen_stats()
results.to_csv(os.path.join(fig_dir, 'diagnostics', 'results3.csv'))
# dic:
text_file = open(os.path.join(fig_dir, 'diagnostics', 'DIC3.txt'), 'w')
text_file.write("Model {}: {}\n".format(m, model.dic))
text_file.close()
print "Time elapsed: ", time.time() - start_time, "s"
# load the models:
else:
print 'loading existing model(s)'
if parallel:
model = []
for i in range(n_jobs):
model.append(hddm.load(os.path.join(model_dir, '{}_{}'.format(model_name,i))))
else:
model = hddm.load(os.path.join(model_dir, '{}_md{}'.format(model_name, 1)))
return model
# settings:
# ---------
# model_name:
model_name = model_names[version]
# data:
# put in 1 folder
data_path1 = os.path.join('/Users/anne/Data/projects/0/neurodec/Data/MEG-PL/Data/HDDM/', '2ifc_data_hddm.csv')
data = pd.read_csv(data_path1)
# model dir:
model_dir = '/Users/anne/Data/projects/0/neurodec/Data/MEG-PL/Data/HDDM/'
# figures dir:
fig_dir = os.path.join(model_dir, model_base_name + model_name)
print(fig_dir)
try:
os.system('mkdir {}'.format(fig_dir))
os.system('mkdir {}'.format(os.path.join(fig_dir, 'diagnostics')))
except:
pass
# subjects:
subjects = np.unique(data.subj_idx)
nr_subjects = np.unique(data.subj_idx).shape[0]
print '# subjects = {}'.format(nr_subjects)
if run:
print 'running {}'.format(model_base_name+model_name)
model = drift_diffusion_hddm(data=data, samples=nr_samples, n_jobs=nr_models, run=run, parallel=parallel, model_name=model_base_name+model_name, model_dir=model_dir, accuracy_coding=accuracy_coding)
else:
# -----------------
# write to file
# -----------------
model_nr = 0
model = drift_diffusion_hddm(data=data, samples=nr_samples, n_jobs=nr_models, run=run,
parallel=parallel, model_name=model_base_name+model_name, model_dir=model_dir, accuracy_coding=accuracy_coding)
params_of_interest_0 = ['z(0)', 'a(0)', 'v(0)', 'dc(0)', 't(0)', 'sv']
params_of_interest_1 = ['z(1)', 'a(1)', 'v(1)', 'dc(1)', 't(1)', 'sv']
params_of_interest_0s = ['z_subj(0)', 'a_subj(0)', 'v_subj(0)', 'dc_subj(0)', 't_subj(0)']
params_of_interest_1s = ['z_subj(1)', 'a_subj(1)', 'v_subj(1)', 'dc_subj(1)', 't_subj(1)']
titles = ['Starting point', 'Boundary sep.', 'Drift rate', 'Drift criterion', 'Non-dec. time', 'Drift rate var']
shell()
# point estimates:
if parallel:
results = model[model_nr].print_stats()
else:
results = model.gen_stats()
results.to_csv(os.path.join(fig_dir, 'diagnostics', 'results.csv'))
for i in range(nr_models):
md = model[i]
# remove fields that scipy io cant handle
unwanted = [None]
unwanted_keys = [k for k, v in md.items() if any([v is i for i in unwanted])]
for k in unwanted_keys: del md[k]
scipy.io.savemat(os.path.join(model_dir, '{}_{}_mat'.format(model_name,i)), md)
shell()
# gelman rubic:
# only make sense when several models were run
gr = hddm.analyze.gelman_rubin(model)
text_file = open(os.path.join(fig_dir, 'diagnostics', 'gelman_rubic.txt'), 'w')
for p in gr.items():
text_file.write("%s:%s\n" % p)
text_file.close()
# dic:
text_file = open(os.path.join(fig_dir, 'diagnostics', 'DIC.txt'), 'w')
for m in range(nr_models):
text_file.write("Model {}: {}\n".format(m, model[m].dic))
text_file.close()
# # analytic plots:
size_plot = nr_subjects / 3.0 * 1.5
model[model_nr].plot_posterior_predictive(samples=10, bins=100, figsize=(6,size_plot), save=True, path=os.path.join(fig_dir, 'diagnostics'), format='pdf')
model[model_nr].plot_posteriors(save=True, path=os.path.join(fig_dir, 'diagnostics'), format='pdf')
# posterios:
# ----------
traces_0 = []
traces_1 = []
for p in range(len(params_of_interest_0)):
traces_0.append(model[model_nr].nodes_db.node[params_of_interest_0[p]].trace.gettrace())
traces_1.append(model[model_nr].nodes_db.node[params_of_interest_1[p]].trace.gettrace())
# fix starting point:
traces_0[0] = traces_0[0] * np.mean((traces_0[1].mean(),traces_1[1].mean()))
traces_1[0] = traces_1[0] * np.mean((traces_0[1].mean(),traces_1[1].mean()))
# # make absolute posteriors:
# traces_0[4] = abs(traces_0[4])
# traces_1[4] = abs(traces_1[4])
# traces_0[5] = abs(traces_0[5])
# traces_1[5] = abs(traces_1[5])
# -----------------
# plot
# -----------------
sns.set(style='ticks', font='Arial', font_scale=1, rc={
'axes.linewidth': 0.25,
'axes.labelsize': 7,
'axes.titlesize': 7,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'xtick.major.width': 0.25,
'ytick.major.width': 0.25,
'text.color': 'Black',
'axes.labelcolor':'Black',
'xtick.color':'Black',
'ytick.color':'Black',} )
sns.plotting_context()
stats = []
for p in range(len(params_of_interest_0)):
data = [traces_0[p], traces_1[p]]
stat = np.mean(data[0] > data[1])
stats.append(min(stat, 1-stat))
stats = np.array(stats)
# stats_corrected = mne.stats.fdr_correction(stats, 0.05)[1]
stats_corrected = stats
fig, axes = plt.subplots(nrows=1, ncols=len(params_of_interest_0), figsize=(len(params_of_interest_0)*1.5,2.5))
ax_nr = 0
for p in range(len(params_of_interest_0)):
data = [traces_0[p], traces_1[p]]
ax = axes[ax_nr]
for d, label, c in zip(data, ['low', 'high'], ['blue', 'red']):
sns.kdeplot(d, vertical=True, shade=True, color=c, label=label, ax=ax)
# sns.distplot(d, vertical=True, hist=False, kde_kws={"shade": True}, norm_hist=True, color=c, label=label, ax=ax)
ax.set_xlabel('Posterior probability')
ax.set_title(titles[p]+'\np={}'.format(round(stats_corrected[p],4)))
ax.set_xlim(xmin=0)
# ax.set_ylim(-1,2)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0.5)
ax.tick_params(width=0.5)
ax_nr+=1
sns.despine(offset=10, trim=True)
axes[0].set_ylabel('Parameter estimate (a.u.)')
plt.tight_layout()
fig.savefig(os.path.join(fig_dir, 'posteriors.pdf'))
# import corner
# # fig = plt.figure()
# fig = corner.corner(np.array(traces_0).T, color='b', labels=titles, **{'lw':1})
# corner.corner(np.array(traces_1).T, color='r', labels=titles, fig=fig, **{'lw':1})
# fig.savefig(os.path.join(fig_dir, 'corner.pdf'))
# #######
# p = 5
# data = [traces_0[p], t0[p]]
# fig = plt.figure(figsize=(3,3))
# ax = fig.add_subplot(111)
# for d, label, c in zip(data, ['All trials', 'TPR fit'], ['black', 'red']):
# sns.kdeplot(d, vertical=True, shade=True, color=c, label=label, ax=ax)
# # sns.distplot(d, vertical=True, hist=False, kde_kws={"shade": True}, norm_hist=True, color=c, label=label, ax=ax)
# ax.set_xlabel('Posterior probability')
# ax.set_ylabel('Drift rate var')
# ax.set_title(titles[p]+'\np={}'.format(round(np.mean(data[0] > data[1]),4)))
# plt.tight_layout()
# sns.despine(offset=10, trim=True)
# fig.savefig(os.path.join(fig_dir, 'posteriors_sv.pdf'))
#
# barplot:
# --------
# all:
parameters_h = []
parameters_l = []
p_value = []
ind = np.ones(nr_subjects, dtype=bool)
for p in range(len(params_of_interest_0s)):
parameters_h.append(np.array([model[model_nr].values.get('{}.'.format(params_of_interest_1s[p]) + str(s)) for s in subjects])[ind])
parameters_l.append(np.array([model[model_nr].values.get('{}.'.format(params_of_interest_0s[p]) + str(s)) for s in subjects])[ind])
param_names = ['z', 'a', 'v', 'dc', 't']
# param_names = ['z', 'a', 'v1', 'v2', 'dc1', 'dc2', 't']
parameters = pd.concat((pd.DataFrame(np.vstack(parameters_h).T, columns=param_names), pd.DataFrame(np.vstack(parameters_l).T, columns=param_names)))
parameters['pupil'] = np.concatenate((np.ones(len(subjects)), np.zeros(len(subjects))))
parameters['subject'] = np.concatenate((subjects, subjects))
k = parameters.groupby(['subject', 'pupil']).mean()
k_s = k.stack().reset_index()
k_s.columns = ['subject', 'pupil', 'param', 'value']
parameters.to_csv(os.path.join(fig_dir, 'params.csv'))
# plot:
locs = np.arange(0,len(param_names))
bar_width = 0.2
fig = plt.figure(figsize=( (1+(len(params_of_interest_1s)*0.3)),2))
ax = fig.add_subplot(111)
sns.barplot(x='param', y='value', units='subject', hue='pupil', hue_order=[1,0], data=k_s, palette=['r', 'b'], ci=None, linewidth=0, alpha=0.5, ax=ax)
sns.stripplot(x="param", y="value", hue='pupil', hue_order=[1,0], data=k_s, jitter=False, size=2, palette=['r', 'b'], edgecolor='black', linewidth=0.25, ax=ax, split=True, alpha=1)
for r in range(len(param_names)):
values = np.vstack((k_s[(k_s['param'] == param_names[r]) & (k_s['pupil'] == 1)].value, k_s[(k_s['param'] == param_names[r]) & (k_s['pupil'] == 0)].value))
x = np.array([locs[r]-bar_width, locs[r]+bar_width])
ax.plot(x, values, color='black', lw=0.5, alpha=0.5)
# # add p-values:
for r in range(len(param_names)):
p1 = myfuncs.permutationTest(k_s[(k_s['pupil']==1) & (k_s['param']==param_names[r])].value, k_s[(k_s['pupil']==0) & (k_s['param']==param_names[r])].value, paired=True)[1]
if p1 < 0.05:
plt.text(s='{}'.format(round(p1, 3)), x=locs[r], y=plt.gca().get_ylim()[1]-((plt.gca().get_ylim()[1] - plt.gca().get_ylim()[0]) / 10.0), size=5, horizontalalignment='center',)
ax.legend_.remove()
plt.xticks(locs, param_names, rotation=45)
sns.despine(offset=10, trim=True)
plt.tight_layout()
fig.savefig(os.path.join(fig_dir, 'bars_all.pdf'))
k_s = parameters.groupby(['subject', 'pupil']).mean()
k_s = k.stack().reset_index()
k_s.columns = ['subject', 'pupil', 'param', 'value']
k_s = k_s[(k_s['param']=='dc')]
param_names = ['dc']
k_s['value'] = abs(k_s['value'])
# plot:
locs = np.arange(0,len(param_names))
bar_width = 0.2
fig = plt.figure(figsize=(1.5,2))
ax = fig.add_subplot(111)
sns.barplot(x='param', y='value', units='subject', hue='pupil', hue_order=[1,0], data=k_s, palette=['r', 'b'], ci=None, linewidth=0, alpha=0.5, ax=ax)
sns.stripplot(x="param", y="value", hue='pupil', hue_order=[1,0], data=k_s, jitter=False, size=2, palette=['r', 'b'], edgecolor='black', linewidth=0.25, ax=ax, split=True, alpha=1)
for r in range(len(param_names)):
values = np.vstack((k_s[(k_s['param'] == param_names[r]) & (k_s['pupil'] == 1)].value, k_s[(k_s['param'] == param_names[r]) & (k_s['pupil'] == 0)].value))
x = np.array([locs[r]-bar_width, locs[r]+bar_width])
ax.plot(x, values, color='black', lw=0.5, alpha=0.5)
# # add p-values:
for r in range(len(param_names)):
p1 = myfuncs.permutationTest(k_s[(k_s['pupil']==1) & (k_s['param']==param_names[r])].value, k_s[(k_s['pupil']==0) & (k_s['param']==param_names[r])].value, paired=True)[1]
if p1 < 0.05:
plt.text(s='{}'.format(round(p1, 3)), x=locs[r], y=plt.gca().get_ylim()[1]-((plt.gca().get_ylim()[1] - plt.gca().get_ylim()[0]) / 10.0), size=5, horizontalalignment='center',)
ax.legend_.remove()
plt.xticks(locs, param_names, rotation=45)
sns.despine(offset=10, trim=True)
plt.tight_layout()
fig.savefig(os.path.join(fig_dir, 'bars_all2.pdf'))
| mit |
miaecle/deepchem | deepchem/trans/tests/test_transformers.py | 1 | 25602 | """
Tests for transformer objects.
"""
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
from deepchem.trans.transformers import DataTransforms
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
import tensorflow as tf
import scipy.ndimage
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
task_type = "regression"
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
def load_feat_multitask_data():
"""Load example with numerical features, tasks."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1", "feat2", "feat3", "feat4", "feat5"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1", "task2", "task3", "task4", "task5"]
input_file = os.path.join(current_dir,
"../../models/tests/feat_multitask_example.csv")
loader = dc.data.UserCSVLoader(
tasks=tasks, featurizer=featurizer, id_field="id")
return loader.featurize(input_file)
def load_gaussian_cdf_data():
"""Load example with numbers sampled from Gaussian normal distribution.
Each feature and task is a column of values that is sampled
from a normal distribution of mean 0, stdev 1."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1"]
input_file = os.path.join(current_dir,
"../../models/tests/gaussian_cdf_example.csv")
loader = dc.data.UserCSVLoader(
tasks=tasks, featurizer=featurizer, id_field="id")
return loader.featurize(input_file)
def load_unlabelled_data():
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = []
input_file = os.path.join(current_dir, "../../data/tests/no_labels.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.featurize(input_file)
class TestTransformers(unittest.TestCase):
"""
Test top-level API for transformer objects.
"""
def setUp(self):
super(TestTransformers, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
init to load the MNIST data for DataTransforms Tests
'''
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
train = dc.data.NumpyDataset(x_train, y_train)
# extract only the images (no need of the labels)
data = (train.X)[0]
# reshaping the vector to image
data = np.reshape(data, (28, 28))
self.d = data
def test_y_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_transform_unlabelled(self):
ul_dataset = load_unlabelled_data()
# transforming y should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(
transform_X=True, dataset=ul_dataset).transform(ul_dataset)
def test_X_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select(self):
"""Tests logarithmic data transformer with selection."""
multitask_dataset = load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_y=True, tasks=tasks, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select(self):
# Tests logarithmic data transformer with selection.
multitask_dataset = load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_X=True, features=features, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# TODO(rbharath): Untransform doesn't work properly for binary feature
# vectors. Need to figure out what's wrong here. (low priority)
## Check that untransform does the right thing.
# np.testing.assert_allclose(normalization_transformer.untransform(X_t), X)
def test_cdf_X_transformer(self):
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_y_transformer(self):
# Test CDF transformer on Gaussian normal dataset.
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
y_restored = cdf_transformer.untransform(y_t)
assert np.max(y_restored - y) < 1e-5
#np.testing.assert_allclose(y_restored, y)
def test_clipping_X_transformer(self):
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer(self):
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
def test_power_X_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_X=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_y=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers) * y.shape[1])
np.testing.assert_allclose(y, y_t[:, :2])
np.testing.assert_allclose(np.power(y, 2), y_t[:, 2:4])
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 4:])
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t), y)
def test_coulomb_fit_transformer(self):
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
def test_IRV_transformer(self):
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(
X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
def test_featurization_transformer(self):
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
transform_X=True,
dataset=train,
featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
self.assertEqual(new_train.y.shape, train.y.shape)
self.assertEqual(new_train.X.shape[-1], fp_size)
def test_blurring(self):
# Check Blurring
dt = DataTransforms(self.d)
blurred = dt.gaussian_blur(sigma=1.5)
check_blur = scipy.ndimage.gaussian_filter(self.d, 1.5)
assert np.allclose(check_blur, blurred)
def test_center_crop(self):
# Check center crop
dt = DataTransforms(self.d)
x_crop = 50
y_crop = 50
crop = dt.center_crop(x_crop, y_crop)
y = self.d.shape[0]
x = self.d.shape[1]
x_start = x // 2 - (x_crop // 2)
y_start = y // 2 - (y_crop // 2)
check_crop = self.d[y_start:y_start + y_crop, x_start:x_start + x_crop]
assert np.allclose(check_crop, crop)
def test_crop(self):
#Check crop
dt = DataTransforms(self.d)
crop = dt.crop(0, 10, 0, 10)
y = self.d.shape[0]
x = self.d.shape[1]
check_crop = self.d[10:y - 10, 0:x - 0]
assert np.allclose(crop, check_crop)
def test_convert2gray(self):
# Check convert2gray
dt = DataTransforms(self.d)
gray = dt.convert2gray()
check_gray = np.dot(self.d[..., :3], [0.2989, 0.5870, 0.1140])
assert np.allclose(check_gray, gray)
def test_rotation(self):
# Check rotation
dt = DataTransforms(self.d)
angles = [0, 5, 10, 90]
for ang in angles:
rotate = dt.rotate(ang)
check_rotate = scipy.ndimage.rotate(self.d, ang)
assert np.allclose(rotate, check_rotate)
# Some more test cases for flip
rotate = dt.rotate(-90)
check_rotate = scipy.ndimage.rotate(self.d, 270)
assert np.allclose(rotate, check_rotate)
def test_flipping(self):
# Check flip
dt = DataTransforms(self.d)
flip_lr = dt.flip(direction="lr")
flip_ud = dt.flip(direction="ud")
check_lr = np.fliplr(self.d)
check_ud = np.flipud(self.d)
assert np.allclose(flip_ud, check_ud)
assert np.allclose(flip_lr, check_lr)
def test_scaling(self):
from PIL import Image
# Check Scales
dt = DataTransforms(self.d)
h = 150
w = 150
scale = Image.fromarray(self.d).resize((h, w))
check_scale = dt.scale(h, w)
np.allclose(scale, check_scale)
def test_shift(self):
# Check shift
dt = DataTransforms(self.d)
height = 5
width = 5
if len(self.d.shape) == 2:
shift = scipy.ndimage.shift(self.d, [height, width])
if len(self.d.shape) == 3:
shift = scipy.ndimage.shift(self.d, [height, width, 0])
check_shift = dt.shift(width, height)
assert np.allclose(shift, check_shift)
def test_gaussian_noise(self):
# check gaussian noise
dt = DataTransforms(self.d)
np.random.seed(0)
random_noise = self.d
random_noise = random_noise + np.random.normal(
loc=0, scale=25.5, size=self.d.shape)
np.random.seed(0)
check_random_noise = dt.gaussian_noise(mean=0, std=25.5)
assert np.allclose(random_noise, check_random_noise)
def test_salt_pepper_noise(self):
# check salt and pepper noise
dt = DataTransforms(self.d)
np.random.seed(0)
prob = 0.05
random_noise = self.d
noise = np.random.random(size=self.d.shape)
random_noise[noise < (prob / 2)] = 0
random_noise[noise > (1 - prob / 2)] = 255
np.random.seed(0)
check_random_noise = dt.salt_pepper_noise(prob, salt=255, pepper=0)
assert np.allclose(random_noise, check_random_noise)
def test_DAG_transformer(self):
"""Tests the DAG transformer."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir,
"../../models/tests/example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
# The transformer generates n DAGs for a molecule with n
# atoms. These are denoted the "parents"
for idm, mol in enumerate(dataset.X):
assert dataset.X[idm].get_num_atoms() == len(dataset.X[idm].parents)
def test_median_filter(self):
#Check median filter
from PIL import Image, ImageFilter
dt = DataTransforms(self.d)
filtered = dt.median_filter(size=3)
image = Image.fromarray(self.d)
image = image.filter(ImageFilter.MedianFilter(size=3))
check_filtered = np.array(image)
assert np.allclose(check_filtered, filtered)
| mit |
rkwitt/pyfsa | core/fsa.py | 1 | 15682 | """fsa.py
This modules implements fine-structure analysis of undirected
graphs with (numeric) vertex attributes. It further contains
functionality to estimate the feature distribution using Gaussian
mixture models, or to build a Bag-of-Words representation from
a collection of feature vectors.
The idea of fine-structure analysis was recently proposed in
[1] Macindoe, O. and W. Richards, "Graph Comparison Using Fine
Structure Analysis". In: Social Computing '10
Note: We do not implement the LBG features of [1]; Our graph
features include a subset of the features proposed in [2]
[2] Li. G. et al., "Graph Classification via Topological and Label
Attributes". In: MLG '11
as well as some additional generic features available in networkx.
"""
__license__ = "Apache License, Version 2.0"
__author__ = "Roland Kwitt, Kitware Inc., University of Salzburg, 2013"
__email__ = "E-Mail: roland.kwitt@kitware.com"
__status__ = "Development"
# Graph handling
import networkx as nx
from networkx.algorithms import bipartite
# Machine learning
import sklearn.mixture.gmm as gm
from sklearn.cluster import KMeans
from collections import defaultdict
# Misc.
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
attr_list = [# Average degree
lambda g : np.mean([e for e in g.degree().values()]),
# Average eccentricity
#lambda g : np.mean([i for i in nx.eccentricity(g).values()]),
# Average closeness centrality
#lambda g : np.mean([e for e in nx.closeness_centrality(g).values()]),
# Percentage of isolated points (i.e., degree(v) = 1)
lambda g : float(len(np.where(np.array(nx.degree(g).values())==1)[0]))/g.order(),
# Spectral radius (i.e., largest AM eigenvalue)
#lambda g : np.abs(nx.adjacency_spectrum(g))[0],
# Spectral trace (i.e., sum of abs. eigenvalues)
# lambda g : np.sum(np.abs(nx.adjacency_spectrum(g))),
# Label entropy, as defined in [2]
lambda g : label_entropy([e[1]['type'] for e in g.nodes(data=True)]),
# Mixing coefficient of attributes
lambda g : np.linalg.det(nx.attribute_mixing_matrix(g,'type')),
# Avg. #vertics with eccentricity == radius (i.e., central points)
# lambda g : np.mean(float(len(nx.center(g)))/g.order()),
# Link impurity, as defined in [2]
lambda g : link_impurity(g)]
# Diameter := max(eccentricity)
# lambda g : nx.diameter(g),
# Radius := min(eccentricity)
#lambda g : nx.radius(g)]
def link_impurity(g):
"""Compute link impurity of vertex-labeled graph.
Parameters
----------
g : networkx Graph
Input graph with vertex attribute stored as 'type'.
Returns
-------
impurity : float
Link impurity, see [2]
"""
if len(g.nodes()) == 1:
return 0
edges = g.edges()
u = np.array([g.node[a]['type'] for (a,b) in edges])
v = np.array([g.node[b]['type'] for (a,b) in edges])
return float(len(np.nonzero(u - v)[0]))/len(edges)
def label_entropy(labels):
"""Compute entropy of label vector.
Parameters
----------
labels : numpy array, shape (L,)
The input labels.
Returns
-------
entropy : float
Entropy of the label vector, see [2]
"""
H = np.bincount(labels)
p = H[np.nonzero(H)].astype(float)/np.sum(H)
return np.abs(-np.sum(p * np.log(p)))
def graph_from_file(graph_file, label_file=None, n_skip=0):
"""Load graph from an ASCII file containing adjacency information.
Parameters
----------
graph_file : string
Filename of the file containing all the adjaceny information. Format of
the adjaceny matrix file is as follows:
[Header, optional]
0 1 1
1 0 0
0 1 0
Interpretation: 3x3 adjaceny matrix, e.g., with an edge between vertices
(0,1) and (0,2), etc.
label_file : string
Filename of the label information file. Here is an example:
[Header, optional]
5
2
1
Interpretation: 3 labels, v_0 label: 5, v_1 label: 2 and v_2 label: 1.
n_skip : int (default: 0)
Skip n header lines.
Returns
-------
G : networkx Graph
"""
logger = logging.getLogger()
if not os.path.exists(graph_file):
raise Exception("Graph file %s not found!" % graph_file)
# Load adjacency information and ensure (0,1) weights
adj_info = np.genfromtxt(graph_file, skip_header=n_skip)
adj_info[np.where(adj_info >= 1)] = 1
G = nx.Graph(adj_info)
if not label_file is None:
if not os.path.exists(label_file):
raise Exception("Label file %d not found!" % label_file)
labels = np.genfromtxt(label_file, skip_header=n_skip)
logger.debug("Loaded labelfile %s!" % label_file)
if len(labels) != len(G):
raise Exception("Size mismatch for labels!")
for idx,l in enumerate(labels):
G.node[idx]['type'] = int(l)
logger.debug("Built graph from %s with %d vertices." %
(graph_file, len(G)))
return G
def compute_graph_features(g, radius=2, sps=None, omit_degenerate=False, run_global=False):
"""Compute graph feature vector(s).
Parameters
----------
g : networkx input graph with N vertices
The input graph on which we need to compute graph features.
radius: int (default: 2)
Compute graph features from local neighborhoods of vertices,
where the notion of neighborhood is defined by the number of
hops to the neighbor, i.e., the radius. This assumes that the
initial edges weights when computing the shortest-paths are 1.
sps: numpy matrix, shape (N, N) (default : None)
Matrix of shortest-path information for the graph g.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
run_global: boolean (default : False)
Compute a GLOBAL graph descriptor using the define features.
Returns
-------
v_mat : numpy matrix, shape (N, D)
A D-dimensional feature matrix with one feature vector for
each vertex. Features are computed for the given radius. In
case global is True, N=1.
"""
logger = logging.getLogger()
# global feature computation
if run_global:
v = [attr_fun(g) for attr_fun in attr_list]
v_mat = np.zeros((1,len(attr_list)))
v_mat[0,:] = np.asarray(v)
return v_mat
# Recompute shortest paths if neccessary
if sps is None:
sps = nx.floyd_warshall_numpy(g)
# Feature matrix representation of graph
v_mat = np.zeros([len(g),len(attr_list)])
# Iterate over all nodes
degenerates = []
for n in g.nodes():
# Get n-th row of shortest path matrix
nth_row = np.array(sps[n,:]).ravel()
# Find elements within a certain radius
within_radius = np.where(nth_row <= radius)
# Build a subgraph from those nodes
sg = g.subgraph(within_radius[0])
# Single vertex sg is considered degenerate
if len(sg.nodes()) == 1:
# Keep track of degenerates
degenerates.append(n)
if omit_degenerate:
continue
# Feature vector is 0-vector
v = np.zeros((len(attr_list),))
else:
v = [attr_fun(sg) for attr_fun in attr_list]
v_mat[n,:] = np.asarray(v)
logger.info("Found %d degenerate cases!" % len(degenerates))
if len(degenerates):
logger.info("Pruning %d degenerate cases ..." % len(degenerates))
v_mat = np.delete(v_mat, degenerates, axis=0)
logger.debug("Computed (%d x %d) feature matrix." %
(v_mat.shape[0], v_mat.shape[1]))
return v_mat
def run_fsa(data, radii=None, recompute=True, out=None, skip=0,
omit_degenerate=False, run_global=False):
"""Run (f)ine-(s)tructure (a)nalysis.
Paramters
---------
data : list of N 3-tuple of (graph files,label files, class
indices). We iterate over this list and compute fine-structure
topological features for each graph.
radii : list of 'int'
The desired neighborhood radii.
recompute: bool (default : True)
Recompote features, otherwise try to load them from disk.
In case we try to load from disk, filenames are constructed
based on the value of the 'out' parameter.
out : string (default : None)
Base file name for the generated data files, e.g.,
'/tmp/data'. Two files will be written to disk:
/tmp/data.mat
/tmp/data.idx
where 'data.mat' contains the feature matrix, i.e., one
feature vector per vertex; 'data.idx' contains the indices
that identify which graph each feature vector belongs to;
skip : int (default : 0)
Skip N header entries when loading graphs.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
run_global : booelan (default : False)
Compute a GLOBAL graph descriptor using the define features.
Returns
-------
X : numpy matrix, shape (#vertices, len(radii)*D)
Feature matrix, where D is the total number of
features that are computed for one radius setting.
L : numpy array, shape (#total vertices,)
Identifies to which graph a feature vector belongs
to.
"""
logger = logging.getLogger()
if radii is None:
raise Exception("No radii given!")
if not out is None:
mat_file = "%s.mat" % out
idx_file = "%s.idx" % out
if not recompute:
if (os.path.exists(mat_file) and
os.path.exists(idx_file)):
logger.info("Loading data from file(s).")
data_mat = np.genfromtxt(mat_file)
data_idx = np.genfromtxt(idx_file)
return {'data_mat' : data_mat,
'data_idx' : data_idx}
data_mat = []
data_idx = []
for idx, (cf, lf, lab) in enumerate(data):
logger.info("Processing %d-th graph ..." % idx)
T, x = graph_from_file(cf, lf, skip), []
if run_global:
# if run_global is True, all other parameters do NOT matter!
x.append(compute_graph_features(T, 0, None, None, run_global))
else:
for r in radii:
x.append(compute_graph_features(T, r, None, omit_degenerate))
xs = np.hstack(tuple(x))
data_mat.append(xs)
data_idx.append(np.ones((xs.shape[0], 1))*idx)
data_mat = np.vstack(tuple(data_mat))
data_idx = np.vstack(tuple(data_idx))
if not out is None:
np.savetxt(mat_file, data_mat, delimiter=' ')
np.savetxt(idx_file, data_idx, delimiter=' ',fmt="%d")
return {'data_mat' : data_mat,
'data_idx' : data_idx}
def estimate_gm(X,components=3,seed=None):
"""Estimate a Gaussian mixture model.
Note: Uses diagonal covariance matrices.
Parameters
----------
X : numpy matrix, shape (N,D)
Matrix of data samples (i-th row is i-th sample vector).
c : int (default : 3)
Number of desired mixture components.
seed : int (default : None)
Seed for the random number generator.
Returns
-------
gm_obj : sklearn.mixture.gmm object
Estimated GMM.
"""
logger = logging.getLogger()
n, d = X.shape
logger.info("Estimating %d-comp. GMM from (%d x %d) ..." %
(components, n, d))
gm_obj = gm.GMM (n_components=components,
covariance_type='diag',
random_state=seed)
gm_obj.fit(X)
return gm_obj
def learn_codebook(X, codebook_size=200, seed=None):
"""Learn a codebook.
Run K-Means clustering to compute a codebook. K-Means
is initialized by K-Means++, uses a max. of 500 iter-
ations and 10 times re-initialization.
Paramters
---------
X : numpy matrix, shape (N,D)
Input data.
codebook_size : int (default : 200)
Desired number of codewords.
seed : int (default : None)
Seed for random number generator.
Returns
-------
cb : sklearn.cluster.KMeans object
KMeans object after fitting.
"""
logger = logging.getLogger()
logger.info("Learning codebook with %d words ..." % codebook_size)
# Run vector-quantization
cb = KMeans(codebook_size,
init="k-means++",
n_init=10,
max_iter=500,
random_state=seed)
cb.fit(X)
return cb
def bow(X, cb):
"""Compute a (normalized) BoW histogram.
Parameters
----------
X : numpy matrix, shape (N, D)
Input data.
cb : sklearn.cluster.KMeans
Already estimated codebook with C codewords.
Returns
-------
H : numpy array, shape (C,)
Normalized (l2-norm) BoW histogram.
"""
# Get nr. codewords
n,d = cb.cluster_centers_.shape
if d != X.shape[1]:
raise Exception("Dimensionality mismatch!")
# Compute closest cluster centers
assignments = cb.predict(X)
# Compute (normalized) BoW histogram
B = range(0,n+1)
return np.histogram(assignments,bins=B,density=True)[0]
def pp_gmm(X, models, argmax=True):
"""Compute the posterior probability of X under a set of GMM models.
Parameters
----------
X : numpy matrix, shape (N,D)
Data samples.
models : list of sklearn.mixture.gmm objects
List of C estimated GMMs.
argmax : boolean (default : True)
If 'True', the index of the class (represented by
it's model) with the highest a-posteriori probability
is computed. If 'False', the a-posteriori probability
if each class (represented by the model) is computed for
each row in X. Note: We assume equal prior probabilities
for each class.
Returns
-------
maxp : numpy.int64, or np.array with shape (N, C)
Depending on whether 'argmax' is 'True' or
'False', the index of the class with the highest
a-posteriori probability is returned, or the
a-posteriori probabilities under each model (for
each feature vector in X).
"""
n,d = X.shape
n_models = len(models)
ll = np.zeros((n,n_models),dtype="float32")
for i, model in enumerate(models):
ll[:,i] = np.asarray(model.score(X)).ravel()
if argmax:
# Column-wise sum
sump = np.sum(ll,axis=0)
# LogSumExp to compute MAP
t0 = np.max(sump)
t1 = np.exp(sump - (np.log(np.sum(np.exp(sump - t0))) + t0))
max_idx = np.argmax(t1)
return max_idx
else:
# LogSumExp to compute row-wise MAP
t0 = np.asmatrix(np.max(ll,axis=1)).transpose()
t1 = np.log(np.sum(np.exp(ll - np.tile(t0,(1,n_models))),axis=1)) + t0
prob = np.exp(np.asmatrix(ll) - t1)
return prob
| apache-2.0 |
percyfal/snakemakelib | snakemakelib/bio/ngs/qc/qualimap.py | 1 | 6130 | # Copyright (C) 2015 by Per Unneberg
import pandas as pd
import numpy as np
from math import log10
from bokeh.plotting import figure, gridplot
from bokeh.charts import Scatter
from bokehutils.geom import points, abline
from bokehutils.facet import facet_grid
from bokehutils.axes import xaxis, yaxis, main
from snakemake.report import data_uri
from snakemakelib.results import Results
from snakemakelib.log import LoggerManager
smllogger = LoggerManager().getLogger(__name__)
COVERAGE_PER_CONTIG_COLUMNS = ["chr", "chrlen", "mapped_bases",
"mean_coverage", "sd"]
GLOBALS_COLUMNS = ["name", "value"]
class Qualimap(Results):
_keys = ['globals', 'coverage_per_contig']
def __init__(self, *args, **kw):
super(Qualimap, self).__init__(*args, **kw)
def _collect_globals(self, data, first, sample):
df_tmp = self.parse_data(data,
rs=("Globals", "Insert"),
skip=1, split=True,
columns=GLOBALS_COLUMNS,
dtype=float, sep=" = ")
df_tmp['value'] = [float(x.split(" ")[0].replace(",", ""))
for x in df_tmp['value']]
df_tmp['Sample'] = sample
try:
if first:
self['globals'] = df_tmp
else:
self['globals'] = self['globals'].append(df_tmp, ignore_index=True)
except:
smllogger.warn("failed to append data to globals dataframe")
def _collect_coverage_per_contig(self, data, first, sample):
df_tmp = self.parse_data(data,
rs=("Coverage per contig", None),
skip=2, split=True,
columns=COVERAGE_PER_CONTIG_COLUMNS,
dtype=float)
df_tmp["Sample"] = sample
try:
df_tmp['chrlen_percent'] = 100 * df_tmp['chrlen'] /\
sum(df_tmp['chrlen'])
df_tmp['mapped_bases_percent'] = 100 * df_tmp['mapped_bases'] /\
sum(df_tmp['mapped_bases'])
except:
smllogger.warn("coverage_per_contig: failed to normalize data")
try:
if first:
self['coverage_per_contig'] = df_tmp
else:
self['coverage_per_contig'] = self['coverage_per_contig'].append(
df_tmp, ignore_index=True)
except:
smllogger.warn("failed to append data to coverage_per_contig dataframe")
def _collect_results(self):
smllogger.info("Collecting results")
first = True
for (f, s) in zip(self._inputfiles, self._samples):
smllogger.debug("Reading input file {f} for sample {s}".format(f=f, s=s))
data = self.load_lines(f)
self._collect_globals(data, first, s)
self._collect_coverage_per_contig(data, first, s)
first = False
if self['globals'] is not None:
self['globals'] = self['globals'].pivot(
index='Sample', columns='name', values='value')
self['globals']['number of unique reads'] = self['globals']['number of mapped reads']\
- self['globals']['number of duplicated reads']
def make_qualimap_plots(qmglobals=None, coverage_per_contig=None):
"""Make qualimap summary plots"""
retval = {'fig': {'coverage_per_contig': None, 'globals': None},
'file': {'coverage_per_contig': coverage_per_contig,
'globals': qmglobals},
'uri': {'coverage_per_contig': data_uri(coverage_per_contig),
'globals': data_uri(qmglobals)}}
# Globals
if qmglobals is not None:
df_all = pd.read_csv(qmglobals)
df_all["Sample"] = df_all["Sample"].astype('str')
READ_COLUMNS = ["number of reads",
"number of mapped reads",
"number of duplicated reads",
"number of unique reads"]
df = df_all[["Sample"] + READ_COLUMNS].pivot_table(index="Sample").stack().reset_index([0,1])
df.columns = ["Sample", "ind", "count"]
df["count"] = [log10(x) for x in df["count"]]
p1 = Scatter(df, x="Sample", y="count",
color="ind", legend="top_right",
ylabel="log10(count)", title="Qualimap read summary")
df_all[READ_COLUMNS] = df_all[READ_COLUMNS].div(df_all["number of reads"], axis=0)*100
df = df_all[["Sample"] + READ_COLUMNS].pivot_table(index="Sample").stack().reset_index([0,1])
df.columns = ["Sample", "ind", "percent"]
p2 = Scatter(df, x="Sample", y="percent",
color="ind", legend="top_right",
title="Qualimap read summary, percent")
retval['fig']['globals'] = gridplot([[p1, p2]])
# Coverage per contig
if coverage_per_contig is not None:
df_all = pd.read_csv(coverage_per_contig, index_col=0)
df_all["Sample"] = df_all["Sample"].astype('str')
fig = figure(width=300, height=300)
points(fig, x="chrlen_percent", y="mapped_bases_percent",
df=df_all, glyph="text", text="chr", text_font_size="8pt")
main(fig, title_text_font_size="8pt")
xaxis(fig, axis_label="Chromosome length of total (%)",
axis_label_text_font_size="8pt")
yaxis(fig, axis_label="Mapped bases of total (%)",
axis_label_text_font_size="8pt")
gp = facet_grid(fig, x="chrlen_percent", y="mapped_bases_percent",
df=df_all, groups=["Sample"], width=300, height=300,
share_x_range=True, share_y_range=True,
title_text_font_size="12pt")
for fig in [item for sublist in gp.children for item in sublist]:
abline(fig, x="chrlen_percent", y="mapped_bases_percent", df=df_all, slope=1)
retval['fig']['coverage_per_contig'] = gp
return retval
| mit |
natefoo/tools-iuc | tools/cwpair2/cwpair2_util.py | 19 | 14130 | import bisect
import csv
import os
import sys
import traceback
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Data outputs
DETAILS = 'D'
MATCHED_PAIRS = 'MP'
ORPHANS = 'O'
# Data output formats
GFF_EXT = 'gff'
TABULAR_EXT = 'tabular'
# Statistics histograms output directory.
HISTOGRAM = 'H'
# Statistics outputs
FINAL_PLOTS = 'F'
PREVIEW_PLOTS = 'P'
STATS_GRAPH = 'C'
# Graph settings.
COLORS = 'krg'
Y_LABEL = 'Peak-pair counts'
X_LABEL = 'Peak-pair distance (bp)'
TICK_WIDTH = 3
ADJUST = [0.140, 0.9, 0.9, 0.1]
PLOT_FORMAT = 'pdf'
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
class FrequencyDistribution(object):
def __init__(self, start, end, binsize=10, d=None):
self.start = start
self.end = end
self.dist = d or {}
self.binsize = binsize
def get_bin(self, x):
"""
Returns the centre of the bin in which a data point falls
"""
return self.start + (x - self.start) // self.binsize * self.binsize + self.binsize / 2.0
def add(self, x):
x = self.get_bin(x)
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for i in range(self.start, self.end, self.binsize):
center = self.get_bin(i)
x.append(center)
y.append(self.dist.get(center, 0))
return x, y
def mode(self):
# There could be more than one mode for a frequency distribution,
# return the median of the modes to be consistent
max_frequency = max(self.dist.values())
modes = sorted(_[0] for _ in self.dist.items() if _[1] == max_frequency)
median_index = len(modes) // 2
return modes[median_index]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def distance(peak1, peak2):
return (peak2[1] + peak2[2]) / 2.0 - (peak1[1] + peak1[2]) / 2.0
def gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}):
return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs))
def gff_attrs(d):
if not d:
return '.'
return ';'.join('%s=%s' % item for item in d.items())
def parse_chromosomes(reader):
# This version of cwpair2 accepts only gff format as input.
chromosomes = {}
for line in reader:
line = line.rstrip("\r\n")
if not line or line.startswith('#'):
continue
cname, _, _, start, end, value, strand, _, _ = line.split("\t")
start = int(start)
end = int(end)
value = float(value)
if cname not in chromosomes:
chromosomes[cname] = []
peaks = chromosomes[cname]
peaks.append((strand, start, end, value))
return chromosomes
def perc95(chromosomes):
"""
Returns the 95th percentile value of the given chromosomes.
"""
values = []
for peaks in chromosomes.values():
for peak in peaks:
values.append(peak[3])
values.sort()
# Get 95% value
return values[int(len(values) * 0.95)]
def peak_filter(chromosomes, threshold):
"""
Filters the peaks to those above a threshold. Threshold < 1.0 is interpreted
as a proportion of the maximum, >=1.0 as an absolute value.
"""
if threshold < 1:
p95 = perc95(chromosomes)
threshold = p95 * threshold
# Make the threshold a proportion of the
for cname, peaks in chromosomes.items():
chromosomes[cname] = [peak for peak in peaks if peak[3] > threshold]
def split_strands(chromosome):
watson = [peak for peak in chromosome if peak[0] == '+']
crick = [peak for peak in chromosome if peak[0] == '-']
return watson, crick
def all_pair_distribution(chromosomes, up_distance, down_distance, binsize):
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
for data in chromosomes.values():
watson, crick = split_strands(data)
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
for cpeak in get_window(crick, peak, up_distance, down_distance, keys):
dist.add(distance(peak, cpeak))
return dist
def make_keys(crick):
return [(data[1] + data[2]) // 2 for data in crick]
def get_window(crick, peak, up_distance, down_distance, keys=None):
"""
Returns a window of all crick peaks within a distance of a watson peak.
crick strand MUST be sorted by distance
"""
strand, start, end, value = peak
midpoint = (start + end) // 2
lower = midpoint - up_distance
upper = midpoint + down_distance
keys = keys or make_keys(crick)
start_index = bisect.bisect_left(keys, lower)
end_index = bisect.bisect_right(keys, upper)
return [cpeak for cpeak in crick[start_index:end_index]]
def match_largest(window, peak):
if not window:
return None
return max(window, key=lambda cpeak: cpeak[3])
def match_closest(window, peak):
if not window:
return None
def key(cpeak):
d = distance(peak, cpeak)
# Search negative distances last
if d < 0:
# And then prefer less negative distances
d = 10000 - d
return d
return min(window, key=key)
def match_mode(window, peak, mode):
if not window:
return None
return min(window, key=lambda cpeak: abs(distance(peak, cpeak) - mode))
METHODS = {'mode': match_mode, 'closest': match_closest, 'largest': match_largest}
def frequency_plot(freqs, fname, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
x, y = freq.graph_series()
pyplot.plot(x, y, '%s-' % COLORS[i])
if len(freqs) > 1:
pyplot.legend(labels)
pyplot.xlim(freq.start, freq.end)
pyplot.ylim(ymin=0)
pyplot.ylabel(Y_LABEL)
pyplot.xlabel(X_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
# Get the current axes
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(fname)
def create_directories():
# Output histograms in pdf.
os.mkdir(HISTOGRAM)
os.mkdir('data_%s' % DETAILS)
os.mkdir('data_%s' % ORPHANS)
os.mkdir('data_%s' % MATCHED_PAIRS)
def process_file(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
statistics = []
for match_method in match_methods:
stats = perform_process(dataset_path,
galaxy_hid,
match_method,
threshold,
up_distance,
down_distance,
binsize,
output_files)
statistics.append(stats)
if output_files == 'all' and method == 'all':
frequency_plot([s['dist'] for s in statistics],
statistics[0]['graph_path'],
labels=list(METHODS.keys()))
return statistics
def perform_process(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
output_details = output_files in ["all", "matched_pair_orphan_detail"]
output_plots = output_files in ["all"]
output_orphans = output_files in ["all", "matched_pair_orphan", "matched_pair_orphan_detail"]
# Keep track of statistics for the output file
statistics = {}
fpath, fname = os.path.split(dataset_path)
statistics['fname'] = '%s: data %s' % (method, str(galaxy_hid))
statistics['dir'] = fpath
if threshold >= 1:
filter_string = 'fa%d' % threshold
else:
filter_string = 'f%d' % (threshold * 100)
fname = '%s_%su%dd%d_on_data_%s' % (method, filter_string, up_distance, down_distance, galaxy_hid)
def make_histogram_path(output_type, fname):
return os.path.join(HISTOGRAM, 'histogram_%s_%s.%s' % (output_type, fname, PLOT_FORMAT))
def make_path(output_type, extension, fname):
# Returns the full path for an output.
return os.path.join(output_type, '%s_%s.%s' % (output_type, fname, extension))
def td_writer(output_type, extension, fname):
# Returns a tab-delimited writer for a specified output.
output_file_path = make_path(output_type, extension, fname)
return csv.writer(open(output_file_path, 'wt'), delimiter='\t', lineterminator="\n")
with open(dataset_path, 'rt') as input:
try:
chromosomes = parse_chromosomes(input)
except Exception:
stop_err('Unable to parse file "%s".\n%s' % (dataset_path, traceback.format_exc()))
if output_details:
# Details
detailed_output = td_writer('data_%s' % DETAILS, TABULAR_EXT, fname)
detailed_output.writerow(('chrom', 'start', 'end', 'value', 'strand') * 2 + ('midpoint', 'c-w reads sum', 'c-w distance (bp)'))
if output_plots:
# Final Plot
final_plot_path = make_histogram_path(FINAL_PLOTS, fname)
if output_orphans:
# Orphans
orphan_output = td_writer('data_%s' % ORPHANS, TABULAR_EXT, fname)
orphan_output.writerow(('chrom', 'strand', 'start', 'end', 'value'))
if output_plots:
# Preview Plot
preview_plot_path = make_histogram_path(PREVIEW_PLOTS, fname)
# Matched Pairs.
matched_pairs_output = td_writer('data_%s' % MATCHED_PAIRS, GFF_EXT, fname)
statistics['stats_path'] = 'statistics.%s' % TABULAR_EXT
if output_plots:
statistics['graph_path'] = make_histogram_path(STATS_GRAPH, fname)
statistics['perc95'] = perc95(chromosomes)
if threshold > 0:
# Apply peak_filter
peak_filter(chromosomes, threshold)
if method == 'mode':
freq = all_pair_distribution(chromosomes, up_distance, down_distance, binsize)
mode = freq.mode()
statistics['preview_mode'] = mode
if output_plots:
frequency_plot([freq], preview_plot_path, title='Preview frequency plot')
else:
statistics['preview_mode'] = 'NA'
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
orphans = 0
# x will be used to archive the summary dataset
x = []
for cname, chromosome in chromosomes.items():
# Each peak is (strand, start, end, value)
watson, crick = split_strands(chromosome)
# Sort by value of each peak
watson.sort(key=lambda data: -float(data[3]))
# Sort by position to facilitate binary search
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
window = get_window(crick, peak, up_distance, down_distance, keys)
if method == 'mode':
match = match_mode(window, peak, mode)
else:
match = METHODS[method](window, peak)
if match:
midpoint = (match[1] + match[2] + peak[1] + peak[2]) // 4
d = distance(peak, match)
dist.add(d)
# Simple output in gff format.
x.append(gff_row(cname,
source='cwpair',
start=midpoint,
end=midpoint + 1,
score=peak[3] + match[3],
attrs={'cw_distance': d}))
if output_details:
detailed_output.writerow((cname,
peak[1],
peak[2],
peak[3],
'+',
cname,
match[1],
match[2],
match[3], '-',
midpoint,
peak[3] + match[3],
d))
i = bisect.bisect_left(keys, (match[1] + match[2]) / 2)
del crick[i]
del keys[i]
else:
if output_orphans:
orphan_output.writerow((cname, peak[0], peak[1], peak[2], peak[3]))
# Keep track of orphans for statistics.
orphans += 1
# Remaining crick peaks are orphans
if output_orphans:
for cpeak in crick:
orphan_output.writerow((cname, cpeak[0], cpeak[1], cpeak[2], cpeak[3]))
# Keep track of orphans for statistics.
orphans += len(crick)
# Sort output descending by score.
x.sort(key=lambda data: float(data[5]), reverse=True)
# Writing a summary to gff format file
for row in x:
row_tmp = list(row)
# Dataset in tuple cannot be modified in Python, so row will
# be converted to list format to add 'chr'.
if row_tmp[0] == "999":
row_tmp[0] = 'chrM'
elif row_tmp[0] == "998":
row_tmp[0] = 'chrY'
elif row_tmp[0] == "997":
row_tmp[0] = 'chrX'
else:
row_tmp[0] = row_tmp[0]
# Print row_tmp.
matched_pairs_output.writerow(row_tmp)
statistics['paired'] = dist.size() * 2
statistics['orphans'] = orphans
statistics['final_mode'] = dist.mode()
if output_plots:
frequency_plot([dist], final_plot_path, title='Frequency distribution')
statistics['dist'] = dist
return statistics
| mit |
yanlend/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
tanayz/Kaggle | BCI/btb.py | 1 | 1474 | __author__ = 'tanay'
# coding: utf-8
"""
Beating the Benchmark
BCI Challenge @ Kaggle
__author__ : Abhishek (abhishek4 AT gmail)
"""
import numpy as np
import pandas as pd
from sklearn import ensemble
labels = pd.read_csv('data/TrainLabels.csv')
submission = pd.read_csv('data/SampleSubmission.csv')
training_files = []
for filename in labels.IdFeedBack.values:
training_files.append(filename[:-6])
testing_files = []
for filename in submission.IdFeedBack.values:
testing_files.append(filename[:-6])
for i, filename in enumerate(np.unique(training_files)):
print i, filename
path = 'data/train/Data_' + str(filename) + '.csv'
df = pd.read_csv(path)
df = df[df.FeedBackEvent != 0]
df = df.drop('FeedBackEvent', axis = 1)
if i == 0:
train = np.array(df)
else:
train = np.vstack((train, np.array(df)))
for i, filename in enumerate(np.unique(testing_files)):
print i, filename
path = 'data/test/Data_' + str(filename) + '.csv'
df = pd.read_csv(path)
df = df[df.FeedBackEvent != 0]
df = df.drop('FeedBackEvent', axis = 1)
if i == 0:
test = np.array(df)
else:
test = np.vstack((test, np.array(df)))
clf = ensemble.RandomForestClassifier(n_jobs = -1,
n_estimators=150,
random_state=42)
clf.fit(train, labels.Prediction.values)
preds = clf.predict_proba(test)[:,1]
submission['Prediction'] = preds
submission.to_csv('benchmark.csv', index = False)
| apache-2.0 |
cbertinato/pandas | pandas/io/excel/_util.py | 1 | 5749 | import warnings
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_integer, is_list_like
_writers = {}
def register_writer(klass):
"""
Add engine to the excel writer registry.io.excel.
You must use this method to integrate with ``to_excel``.
Parameters
----------
klass : ExcelWriter
"""
if not callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
def _get_default_writer(ext):
"""
Return the default writer for the given extension.
Parameters
----------
ext : str
The excel file extension for which to get the default engine.
Returns
-------
str
The default engine for the extension.
"""
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
xlsxwriter = import_optional_dependency("xlsxwriter",
raise_on_missing=False,
on_version="warn")
if xlsxwriter:
_default_writers['xlsx'] = 'xlsxwriter'
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
def _excel2num(x):
"""
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
"""
index = 0
for c in x.upper().strip():
cp = ord(c)
if cp < ord("A") or cp > ord("Z"):
raise ValueError("Invalid column name: {x}".format(x=x))
index = index * 26 + cp - ord("A") + 1
return index - 1
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(range(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols
def _maybe_convert_usecols(usecols):
"""
Convert `usecols` into a compatible format for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentially convert.
Returns
-------
converted : object
The compatible format of `usecols`.
"""
if usecols is None:
return usecols
if is_integer(usecols):
warnings.warn(("Passing in an integer for `usecols` has been "
"deprecated. Please pass in a list of int from "
"0 to `usecols` inclusive instead."),
FutureWarning, stacklevel=2)
return list(range(usecols + 1))
if isinstance(usecols, str):
return _range2cols(usecols)
return usecols
def _validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if (
len(freeze_panes) == 2 and
all(isinstance(item, int) for item in freeze_panes)
):
return True
raise ValueError("freeze_panes must be of form (row, column)"
" where row and column are integers")
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header(row, control_row):
"""Forward fill blank entries in row but only inside the same parent index.
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of bool
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
-------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == '' or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return row, control_row
def _pop_header_name(row, index_col):
"""
Pop the header name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header name.
index_col : int, list
The index columns for our data. Assumed to be non-null.
Returns
-------
header_name : str
The extracted header name.
trimmed_row : list
The original data row with the header name removed.
"""
# Pop out header name and fill w/blank.
i = index_col if not is_list_like(index_col) else max(index_col)
header_name = row[i]
header_name = None if header_name == "" else header_name
return header_name, row[:i] + [''] + row[i + 1:]
| bsd-3-clause |
pianomania/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 49 | 46769 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
jmmease/pandas | pandas/core/panelnd.py | 7 | 4562 | """ Factory methods to create N-D panels """
import warnings
from pandas.compat import zip
import pandas.compat as compat
def create_nd_panel_factory(klass_name, orders, slices, slicer, aliases=None,
stat_axis=2, info_axis=0, ns=None):
""" manufacture a n-d class:
.. deprecated:: 0.19.0
The recommended way to represent these types of n-dimensional data
are with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
Pandas provides a `.to_xarray()` method to automate this conversion.
Parameters
----------
klass_name : the klass name
orders : the names of the axes in order (highest to lowest)
slices : a dictionary that defines how the axes map to the slice axis
slicer : the class representing a slice of this panel
aliases : a dictionary defining aliases for various axes
default = { major : major_axis, minor : minor_axis }
stat_axis : the default statistic axis default = 2
info_axis : the info axis
Returns
-------
a class object representing this panel
"""
# if slicer is a name, get the object
if isinstance(slicer, compat.string_types):
import pandas
try:
slicer = getattr(pandas, slicer)
except:
raise Exception("cannot create this slicer [%s]" % slicer)
# build the klass
ns = {} if not ns else ns
klass = type(klass_name, (slicer, ), ns)
# setup the axes
klass._setup_axes(axes=orders, info_axis=info_axis, stat_axis=stat_axis,
aliases=aliases, slicers=slices)
klass._constructor_sliced = slicer
# define the methods ####
def __init__(self, *args, **kwargs):
# deprecation GH13564
warnings.warn("\n{klass} is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of n-dimensional data are with the\n"
"`xarray package "
"<http://xarray.pydata.org/en/stable/>`__.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n".format(
klass=self.__class__.__name__),
FutureWarning, stacklevel=2)
if not (kwargs.get('data') or len(args)):
raise Exception("must supply at least a data argument to [%s]" %
klass_name)
if 'copy' not in kwargs:
kwargs['copy'] = False
if 'dtype' not in kwargs:
kwargs['dtype'] = None
self._init_data(*args, **kwargs)
klass.__init__ = __init__
def _get_plane_axes_index(self, axis):
""" return the sliced index for this object """
# TODO: axis_name is not used, remove?
axis_name = self._get_axis_name(axis) # noqa
index = self._AXIS_ORDERS.index(axis)
planes = []
if index:
planes.extend(self._AXIS_ORDERS[0:index])
if index != self._AXIS_LEN:
planes.extend(self._AXIS_ORDERS[index + 1:])
return planes
klass._get_plane_axes_index = _get_plane_axes_index
def _combine(self, other, func, axis=0):
if isinstance(other, klass):
return self._combine_with_constructor(other, func)
return super(klass, self)._combine(other, func, axis=axis)
klass._combine = _combine
def _combine_with_constructor(self, other, func):
# combine labels to form new axes
new_axes = []
for a in self._AXIS_ORDERS:
new_axes.append(getattr(self, a).union(getattr(other, a)))
# reindex: could check that everything's the same size, but forget it
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, new_axes)])
d['copy'] = False
this = self.reindex(**d)
other = other.reindex(**d)
result_values = func(this.values, other.values)
return self._constructor(result_values, **d)
klass._combine_with_constructor = _combine_with_constructor
# set as NonImplemented operations which we don't support
for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter',
'dropna', 'shift']:
def func(self, *args, **kwargs):
raise NotImplementedError("this operation is not supported")
setattr(klass, f, func)
# add the aggregate operations
klass._add_aggregate_operations()
klass._add_numeric_operations()
return klass
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/metrics/cluster/supervised.py | 16 | 30444 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
mohazahran/Detecting-anomalies-in-user-trajectories | scripts/bipartite-it.py | 2 | 1698 | #-*- coding: utf8
from __future__ import division, print_function
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib
#matplotlib.use('Agg')
import sys
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_fpath', help='The name of the model file (a h5 file)', \
type=str)
args = parser.parse_args()
model = pd.HDFStore(args.model_fpath, 'r')
assign = model['assign'].values[:, 0]
Theta_zh = model['Theta_zh'].values
Psi_oz = model['Psi_sz'].values
hyper2id = model['hyper2id'].values
source2id = model['source2id'].values
from collections import Counter
id2hyper = dict((r[1], r[0]) for r in hyper2id)
id2source = dict((r[1], r[0]) for r in source2id)
nz = Psi_oz.shape[1]
k = 10
for z in xrange(nz):
print(z)
print('These Contributors (name, P[z|c])\n--')
n = len(Theta_zh[z])
p = 1.0 / n
t = p + 1.96 * np.sqrt((1.0 / n) * p * (1 - p))
for i in Theta_zh[z].argsort()[::-1][:k]:
if Theta_zh[z, i] > t:
print(id2hyper[i], Theta_zh[z, i], sep='\t')
print()
print('Transition Through These Artists (name, P[a|z])\n--')
n = len(Psi_oz[:, z])
p = 1.0 / n
t = p + 1.96 * np.sqrt((1.0 / n) * p * (1 - p))
for i in Psi_oz[:, z].argsort()[::-1][:k]:
if Psi_oz[i, z] > t:
print(id2source[i], Psi_oz[i, z], sep='\t')
print()
print()
model.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
wilmerhenao/CaseCreation | casecreator.py | 1 | 16083 | #!/home/wilmer/anaconda3/bin/python
__author__ = 'wilmer'
try:
import mkl
have_mkl = True
print("Running with MKL Acceleration")
except ImportError:
have_mkl = False
print("Running with normal backends")
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pylab
import itertools
import math
from scipy.sparse import csr_matrix, lil_matrix
## Class with static information about the case
class caseinfo:
## Center of the body (which also will contain a tumour) Coord. X
isoX = 0.0
## Center of the body (which also will contain a tumour) Coord. X
isoY = 0.0
## Default radius of the body. Will be overriden
R = 20.0
## Number of beamlets in the fan
N = 64
## Interbeamlet distance in the fan. Which amounts to 6mms.
interleaf = 0.6
## Source to axis distance calibration in cms
SAD = 80
## Original fan with N positions (64)
genFan2D = None
## Constructor
def __init__(self, x = 0.0, y = 0.0, radio = 20.0, numgantrybeamlets = 64):
self.isoX = x
self.isoY = y
self.R = radio
self.genFan2D = np.matrix([[self.interleaf * (i - self.N/2 + 1/2), self.SAD] for i in range(0, self.N)]).transpose()
self.N = numgantrybeamlets
## Class that uses a data pair and implements some geographical operations. Depth of the voxel given beam.
class voxelbeamletpair:
plotsomething = 0
## Constructor Function
def __init__(self, v):
## Voxel center x coordinate
self.x = v.x
## Voxel Center y coordinate
self.y = v.y
## Variable containing the depth of the voxel in the direction from the beamlet or how much the beam travels.
self.depth = None
## This function calculates the distance from my geographical location to the center of a beamlet (not used)
def distToBeamC(self, xBeamC, yBeamC):
d = np.sqrt(np.sum((self.x - xBeamC )**2 + (self.y - yBeamC )**2))
return(d)
## Function to calculate distance from this point to isocenter (not used)
def distToIsoC(self, thiscase):
d = np.sqrt(np.sum((self.x - thiscase.isoX)**2 + (self.y - thiscase.isoY)**2))
return(d)
## This function finds whether a point lies INSIDE the line SEGMENT between the beamlet and the voxel or not.
def isinterior(self, xinterp, xBeamC):
ininterior = False
if (min(xBeamC, self.x) <= xinterp and xinterp <= max(xBeamC, self.x)):
ininterior = True
return(ininterior)
## Find the depth of this voxel inside the body. The depth will be the only factor used in order to calculate
# accumulated dose.
def depthBeamC(self, xBeamC, yBeamC, R):
## To understand the methodology look at http://mathworld.wolfram.com/Circle-LineIntersection.html
# First I initialize some variables
x2 = xBeamC
y2 = yBeamC
x1 = self.x
y1 = self.y
dx = x2 - x1
dy = y2 - y1
dr = np.sqrt(dx**2 + dy**2)
D = x1 * y2 - x2 * y1
# There are two point of intersection
xinterp = (D * dy + np.sign(dy) * dx * np.sqrt(R**2 * dr**2 - D**2))/(dr**2)
xinterm = (D * dy - np.sign(dy) * dx * np.sqrt(R**2 * dr**2 - D**2))/(dr**2)
yinterp = (-D * dx + np.abs(dy) * np.sqrt(R ** 2 * dr ** 2 - D ** 2)) / (dr ** 2)
yinterm = (-D * dx - np.abs(dy) * np.sqrt(R ** 2 * dr ** 2 - D ** 2)) / (dr ** 2)
# Check which one of the intersection points lies in the segment. Only 1 coordinate necessary.
if (self.isinterior(xinterp, xBeamC)):
intX = xinterp
intY = yinterp
else:
intX = xinterm
intY = yinterm
# Check that indeed you did everything right
assert(min(xBeamC, self.x) <= intX and intX <= max(xBeamC, self.x))
## Use the point of intersection of line and circle to calculate the depth of the voxel
self.depth = np.sqrt((intX - self.x)**2 + (intY - self.y)**2)
if 2810 == self.plotsomething:
print('plotting something')
circlemain = plt.Circle((0, 0), 20, color='blue', fill=False)
fig2 = plt.gcf()
fig2.gca().add_artist(circlemain)
pylab.xlim([min(xBeamC, self.x) - 2, max(xBeamC, self.x) + 2])
pylab.ylim([min(yBeamC, self.y) - 2, max(yBeamC, self.y) + 2])
fig2.suptitle('Ray Plot. Beamlet source located at ('+ str(xBeamC) + ', ' + str(yBeamC) + ')')
plt.plot([self.x, xBeamC], [self.y, yBeamC], 'ro', mew = 10)
# Plot the line segment from the voxel center to the beamlet
# Calculate directionvector
vx = xBeamC - self.x
vy = yBeamC - self.y
nm = np.sqrt(vx**2 + vy**2)
vx, vy = self.depth * vx/nm, self.depth * vy/nm
plt.plot((self.x, self.x + vx), (self.y, self.y + vy), 'r', lw = 1)
plt.plot((self.x + vx, xBeamC), (self.y + vy, yBeamC), 'g--', lw = 1)
fig2.savefig('voxelandray.png')
voxelbeamletpair.plotsomething = voxelbeamletpair.plotsomething + 1
## Abstract class that implements a volume of interest with common location and radius. Parent of OAR and TARGET
class VOI:
## Static counter of Volumes of Interest
numVOIs = 0
## Definition necessary for an abstract class
__metaclass__ = ABCMeta
## Constructor function
def __init__(self, thiscase, x = 0.0, y = 0.0, r = 0.0):
## Object with general information about the case
self.tc = thiscase
## X location of center
self.xcenter = x
## Y location of ceter
self.ycenter = y
## Radius of the Volume of Interest. All of them are circumferences
self.radius = r
## Boolean that determines whether this is a target or not (in case of False, it is an OAR)
self.isTarget = None
## Is this region contained inside the body?
self.isinside = self.isContained()
## Unique ID for each particular Volume of Interest
self.VOIID = VOI.numVOIs
VOI.numVOIs = VOI.numVOIs + 1
## This method finds whether the attribute is viable, given its center and radius and given the center and radius
## of the original body that contains it
def isContained(self):
isv = True
## Find radius from center of VOI to center of structure
distcenter = np.sqrt((self.xcenter - self.tc.isoX)**2 + (self.ycenter - self.tc.isoY)**2)
if distcenter + self.radius > self.tc.R:
isv = False
return (isv)
## This method takes a location in space and returns whether this location exists in this VOI or not
def isInThisVOI(self, x, y):
isinit = False
distcenter = np.sqrt((self.xcenter - x) ** 2 + (self.ycenter - y) ** 2)
if distcenter <= self.radius:
isinit = True
return(isinit)
# Abstract method to be implemented by classes deriving from here
@abstractmethod
def printVOI(self):
pass
class OAR(VOI):
numOARS = 0
## Constructor function that also calls the constructor of VOI. Notice that a VOI object is instantiated first, and
# then an OAR object is instantiated later
def __init__(self, thiscase, x = 0.0, y = 0.0, r = 0.0):
## Boolean. Is this a target structure?
self.isTarget = True
super(OAR, self).__init__(thiscase, x, y, r)
## Assign an ID to each of the different OARs
self.OARID = self.VOIID
OAR.numOARS = OAR.numOARS + 1
## Print the characteristics of this structure on screen
def printVOI(self):
print('OAR with center (', self.xcenter, ', ', self.ycenter, '); and radius ', self.radius)
class TARGET(VOI):
numTARGETS = 0
## Constructor function that also calls the constructor of VOI. Notice that a VOI object is instantiated first, and
# then a TARGET object is instantiated later
def __init__(self, thiscase, x = 0.0, y = 0.0, r = 0.0):
## Boolean. Is this a target structure?
self.isTarget = False
super(TARGET, self).__init__(thiscase, x, y, r)
## Assign an ID to each of the different targets
self.TARGETID = self.VOIID
TARGET.numTARGETS = TARGET.numTARGETS + 1
## Print the characteristics of this structure on screen
def printVOI(self):
print('Target with center (', self.xcenter, ', ', self.ycenter, '); and radius ', self.radius)
## The next class defines a control point; in particular, the location of all beamlets, notice that angle or zero is
# located in the part above, and the gantry moves counterclockwise
class ControlPoint:
## Constructor Function
def __init__(self, ctrlAngle, thiscase):
## Global data structure of the case
self.tc = thiscase
## This control point angle in degrees
self.angleDegs = ctrlAngle
## This control point angle in radians
self.angleRads = (2 * np.pi * self.angleDegs)/360
rotMat = np.matrix([[np.cos(self.angleRads), -np.sin(self.angleRads)], [np.sin(self.angleRads), np.cos(self.angleRads)]])
## This fan beam. Notice that it gets rotated the number of radians necessary
self.thisFan = rotMat * thiscase.genFan2D
## Find the unit vector that points towards the isocenter
self.UnitVector = (np.sin(self.angleRads), -np.cos(self.angleRads))
## Find the unit vector that is perpendicular to the original unit vector
self.normaltoUnit = (-self.UnitVector[1], self.UnitVector[0])
## Find normal distances to each of the beamlet array centers. This function returns an array of distances from which
# I will choose those that are small enough to correspond to beams that affect the radiation to the supplied voxel.
def findNDist(self, x, y):
distances = []
for i in range(0, self.tc.N):
beamlet = (self.thisFan[0,i], self.thisFan[1,i])
#print('i, beamlet, x, y: ', i, beamlet, x, y)
#print('unit vector', self.UnitVector)
vecpos = x - beamlet[0], y - beamlet[1]
#print('distance:', math.fabs(vecpos[1] * self.normaltoUnit[1] + vecpos[0] * self.normaltoUnit[0]))
distances.append(math.fabs(vecpos[1] * self.normaltoUnit[1] + vecpos[0] * self.normaltoUnit[0]))
return(distances)
## This class defines not only the x,y position of a voxel, but also assigns to it a unique ID and maps a structure to it.
class voxel:
## Static variable that serves as a counter of how many voxels are actively being used at any time.
numVOXELS = 0
def __init__(self, vc, OARS, TARGETS):
## Indicates a unique ID for each of the voxels
self.voxelID = voxel.numVOXELS
## x location of voxel center
self.x = vc[0]
## y location of voxel center
self.y = vc[1]
## Does this voxel belong to ANY VOI?
self.belongsToVOI = False
## ID of the VOI to which this voxel belongs to. There is a hierarchy that depends on the order of the VOIS with
# targets taking precedence over OARs.
self.inStructureID = None
## Run this code for all OARs and TARGETs, preference to targets
for voi in OARS + TARGETS:
if voi.isInThisVOI(self.x, self.y):
self.belongsToVOI = True
self.inStructureID = voi.VOIID
break
voxel.numVOXELS = voxel.numVOXELS + 1
## This function calculates the total dose given a certain depth. It is short but it was created independently so that
# it is easily modified later
def calcDose(depth):
return(np.exp(-0.04 * depth))
## This is the main function takes a list of numeric values as arguments and produces the list of D matrices
## The box has isocenter on position thiscase.x
# Inputs:
# anglelist = [list of numeric]. These are the control points
# numhozv = Number of horizontal voxel divisions
# numverv = Number of vertical voxel divisions
# xgeoloc = X Geographic location of upper-right side of box
# ygeoloc = Y Geographic location of upper-right side of box
# OARList = List of OAR centers and radiuses
# TARGETList = List of Target centers and radiuses
# Outputs:
# listofD = List of D matrix objects
# upper right corner is (X,Y), lower left corner is (-X, -Y)
def createDosetoPoints(thiscase, anglelist, numhozv, numverv, xgeoloc, ygeoloc, OARS, TARGETS):
## Generate 360 control point
cps = [ControlPoint(i, thiscase) for i in anglelist]
## Create voxels
voxelhoz = np.arange(-xgeoloc, xgeoloc, 2 * xgeoloc/numhozv) + xgeoloc/numhozv
voxelvec = np.arange(-ygeoloc, ygeoloc, 2 * ygeoloc/numverv) + ygeoloc/numverv
## Create cartesian product to find voxel centers
voxelcenters = itertools.product(voxelhoz, voxelvec)
## Limit the list only to those voxels that are included in the body and assign a organ to them
allvoxels = [voxel(voxelcenter, OARS, TARGETS) for voxelcenter in voxelcenters]
voxel.numVOXELS = 0 # Fix this value at zero because I will have to recount in the next line
## Filter only those voxels that belong in any VOI. This is the order that will be preserved
voxels = [voxel((vxinvoi.x, vxinvoi.y), OARS, TARGETS) for vxinvoi in allvoxels if vxinvoi.belongsToVOI]
allvoxels = None # Free some memory
Dlist = []
## Create a matrix for each of the control points
for cp in cps:
D = lil_matrix((len(voxels), caseinfo.N), dtype = np.float)
for v in voxels:
# Find beamlets associated with a particular voxel at a certain control point.
bsst = findvoxbeamlets(v.x, v.y, cp)
if not bsst:
continue
else:
bs = bsst[0]
for blet in bs:
vbpair = voxelbeamletpair(v)
vbpair.depthBeamC(cp.thisFan[0, bs[0]], cp.thisFan[1, bs[0]], thiscase.R)
D[v.voxelID, bs[0]] = calcDose(vbpair.depth)
Dlist.append(D)
return(Dlist, voxels)
## This function is here to find the beamlets associated with a particular voxel at a certain control point
## You should be able to change it in case the team requires a different function
def findvoxbeamlets(x, y, cp):
dists = cp.findNDist(x, y)
return([(i, dists[i]) for i in range(0, len(dists)) if dists[i] < 0.6/2])
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 26,
}
## Implementation part that should be separated later
def plotstructure(thiscase, OARlist, TARGETlist, xgeo, ygeo, voxels):
## This function plots the case to make sure that everything is understood
numOARS = len(OARlist)
numTARGETS = len(TARGETlist)
# Plot the outside circle
circlemain = plt.Circle((thiscase.isoX, thiscase.isoY), thiscase.R, color = 'blue', fill = False)
fig = plt.gcf()
fig.gca().add_artist(circlemain)
pylab.xlim([-xgeo, xgeo])
pylab.ylim([-ygeo, ygeo])
for i in range(0, numOARS):
circle = plt.Circle((OARlist[i].xcenter, OARlist[i].ycenter), OARlist[i].radius, color = 'g', fill = False)
fig.gca().add_artist(circle)
plt.text(OARlist[i].xcenter, OARlist[i].ycenter, str(OARlist[i].OARID), fontdict=font)
for i in range(0, numTARGETS):
circle = plt.Circle((TARGETlist[i].xcenter, TARGETlist[i].ycenter), TARGETlist[i].radius, color = 'r', fill = False)
fig.gca().add_artist(circle)
plt.text(TARGETlist[i].xcenter, TARGETlist[i].ycenter, str(TARGETlist[i].TARGETID), fontdict=font)
fig.suptitle('Case Plot')
## Plot points in the voxel structure
for v in voxels:
plt.plot(v.x, v.y, 'ro')
fig.savefig('plotcase.png')
## This function saves vectors in a shape that tomotherapy project can understand
def savevector(necfile, data, filetype):
print(type(data))
mynpdata = np.matrix(data, dtype=filetype)
print('dtypes', mynpdata.dtype)
mynpdata.tofile(necfile)
| mit |
OshynSong/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
WormLabCaltech/Angeles_Leighton_2016 | src/pyrnaseq_graphics.py | 1 | 19233 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 31 17:21:21 2016
A library of useful functions for RNA-seq analysis.
Accepts as input dataframes generated exclusively by sleuth.
May work with others but it certainly isn't optimized to do so.
@author: David Angeles Albores
@contact: dangeles@caltech.edu
A note on nomenclature. This library is meant to work with output from sleuth
I assume you will have a single dataframe for each regression value in any
model you have.
Any such dataframe is referred to here as 'dfplot', since it has the values to
be plotted.
Crucially, 'dfplot' should have the following columns:
dfplot.b
dfplot.qval
dfplot.ens_gene <--- must contain WBIDs only!
In order to select broad categories of genes, you will need dataframes that
contain the names of these genes. These dataframes are generically referred to
as dfindex dataframes. There are multiple possible formats for these, but they
all must have the following column:
dfindex.gene <--- WBIDs only!
Broadly speaking, there are three categories of dfindex so far
dfindex -- tissue data
dfindex -- gold standard comparison
dfindex -- genes with an expected effect
As such, there are 3 categories of functions:
Tissue-plotting functions
Gold-standard plotting
Effect plots
Tissue functions
To generate an appropriate tissue dfindex dataframe, simply use the
'organize' function to crawl through all tissues and assemble lists of genes
that are annotated in the tissue of interest.
Beware, 'organize' simply searches
for nodes that share substrings. i.e., if you said 'neuron', you'd get
genes annotated in either 'neuron' and 'dopaminergic neuron'. Likewise,
for 'tail' you may get 'tail neuron' or 'male tail'. So use organize carefully.
Longer 'names' may serve you better (i.e. 'dopaminergic neuron',
instead of just 'neuron').
Use the resulting dataframe to call on all the tissue related functions
In case you want your own dataframe, the df for tissue should have the
following columns:
dfindex.gene -- WBID
dfindex.tissue -- a string denoting a specific anatomic location
dfindex.expressed -- binary, denoting whether a gene is expressed there
Gold-standard functions
Gold-standard dataframes are dataframes that contain a list of genes that
we are interested in querying. In particular, we are mainly interested in
presence/absence of these genes, and the data have typically been generated
by previous publications. The dataframe is fairly small as a result
Columns:
dfindex.gene -- WBID
dfindex.origin -- a string, the name of the dataset this gene came from
Plot-by-value functions
These functions are designed to allow you to compare the value associated with
a list of genes BETWEEN datasets. for example, you may be interested in aging
With a two factor design, you may have an aging coefficient, and a genotype
coefficient. Your mutant of interest might be expected to have positive effects
on aging. As such, you might want to study how the distribution of genes
associated 'positively' with lifespan varies with age, and how it varies by
genotype on the same graph. These functions are designed to do exactly this.
In order to carry out these comparisons, your dataframe must have the
following columns:
dfindex.gene
dfindex.effect -- some categorical variable
"""
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
def wbid_extractor(tissue_df, main_tissue):
"""
Given a string 'main_tissue', find all columns that
have a substring equal to it in tissue_df. Then,
extract all the wbids that are expressed in any of these
columns and return a non-redundant list of these wbids
"""
if type(main_tissue) != str:
raise ValueError('please input a string in main tissue')
matching = [s for s in tissue_df.columns if main_tissue in s]
names = []
for i in matching:
if len(names) == 0:
names = tissue_df.wbid[tissue_df[i] == 1].values
else:
names = np.append(names, tissue_df.wbid[tissue_df[i] == 1].values)
names = list(set(names))
return names
def organize(names, tissue_df):
"""
Pick your favourite tissues, place them in a list
provide the tissue dictionary and they will be assembled
into a dataframe of all tissues that include that word
INPUT:
names - a list of tissues
tissue_df - our standard tissue dictionary
OUTPUT:
df1 - a 'tidy' dataframe with three cols: 'wbid' 'tissue' 'expressed'
"""
# guarantee its iterable
if type(names) in [str]:
names = [names]
names = ['gene']+names
df = pd.DataFrame(index=np.arange(0, len(tissue_df)), columns=names)
df.gene = tissue_df.wbid
for i, value in enumerate(names):
genes_in_tissue = wbid_extractor(tissue_df, value)
df[value][df.gene.isin(genes_in_tissue)] = 1
df.fillna(0, inplace=True)
df1 = pd.melt(df, id_vars='gene', value_vars=names[1:])
df1.columns = ['gene', 'tissue', 'expressed']
return df1
def fix_axes(**kwargs):
"""Makes modifications to axes to ensure proper plotting."""
title = kwargs.pop('title', '')
savename = kwargs.pop('savename', '')
xlab = kwargs.pop('xlab', r'$\beta$')
ylab = kwargs.pop('ylab', r'log$_{10}Q$')
yscale = kwargs.pop('yscale', 'log')
xscale = kwargs.pop('xscale', 'symlog')
xlim = kwargs.pop('xlim', [])
ylim = kwargs.pop('xlim', [])
loc = kwargs.pop('loc', 1)
if len(xlim) != 0:
plt.gca().set_xlim(xlim)
if len(ylim) != 0:
plt.gca().set_ylim(ylim)
plt.gca().legend(fontsize=15.5, loc=loc, frameon=True)
plt.title(title)
plt.xlabel(xlab, fontsize=15)
plt.ylabel(ylab, fontsize=15)
plt.xscale(xscale)
plt.yscale(yscale)
if savename:
plt.savefig(savename)
def volcano_plot_tissue(tissue, xname, yname, q, dfplot, dfindex, ax,
label, col='b', a=.8):
"""
Plots all the tissue specific genes,i.e. all genes that appear in one
and only one 'tissue'.
tissue -- name of the tissue to be plotted
xname, yname= name of column 'x' and 'y' in dfplot
dfindex= must be exactly the tissue dataframe generated by organize
"""
g = lambda x: ((dfindex.expressed == 1) & (dfindex.tissue == x))\
& (~dfindex[dfindex.expressed == 1].duplicated('gene'))
f = lambda x: (dfplot.ens_gene.isin(x)) & (dfplot.qval < q)
gene_selection = g(tissue)
genes_to_plot = dfindex[gene_selection].gene
ind = f(genes_to_plot)
x = dfplot[ind][xname]
y = dfplot[ind][yname]
plt.gca().plot(x, -np.log10(y), 'o', color=col, ms=6, alpha=a, label=label)
def explode(x, y, q, dfplot, dfindex, colors, **kwargs):
"""
A function that makes a complete volcano plot of single-tissue specific
genes. Warning: Genes annotated in multiple tissues are NOT plotted as
tissue specific genes at the moment.
It will plot:
Genes not statistically significantly expressed
Genes stat. sig. expressed but not in one of the dfindex lists
Genes in dfindex (each tissue should have a different color)
Parameters:
x= name of the column containing fold-change or regression values for genes
y= qvalue (NOT negative logged!)
dfplot= df containing columns x, y
dfindex= tissue dataframe output by 'organize' function
colors= a vector of colors. Should have as many colors as 2 + no. tissues
kwargs= all the kwargs that can be passed to fix-axes
"""
a = kwargs.pop('a', .8)
# provide enough colors
nvals = len(dfindex.tissue.unique()) + 2
if nvals > colors:
raise ValueError('len(colors) is < than len(tissues) +2!')
# make sure x, y in dfplot
if not [1 for s in dfplot.columns if s == x][0]:
raise ValueError('x must be a dataframe column in dfplot')
if not [1 for s in dfplot.columns if s == y][0]:
raise ValueError('x must be a dataframe column in dfplot')
# find all genes not expressed anywhere, or genes that are in multiple
# 'tissues'
#
ind1 = (dfindex.expressed == 0) | \
(dfindex[dfindex.expressed == 1].duplicated('gene'))
ind2 = (dfplot.ens_gene.isin(dfindex[ind1].gene)) & (dfplot.qval < q)
xnotisssig = dfplot[ind2][x]
ynotisssig = dfplot[ind2][y]
fig, ax = plt.subplots()
plt.plot(xnotisssig, -np.log10(ynotisssig), 'o',
color=colors[0], ms=6, alpha=a, label='all others')
ind = (dfplot.qval > q)
xnotsig = dfplot[ind].b
ynotsig = dfplot[ind].qval
fig, ax = plt.subplots()
plt.plot(xnotsig, -np.log10(ynotsig), 'o',
color=colors[0], ms=6, alpha=a, label='all others')
values = dfindex.tissue.unique()
# plot all the points not associated with a tissue
for i, value in enumerate(values):
volcano_plot_tissue(value, x, y, q, dfplot, dfindex, label=value,
col=colors[i+2], ax=ax, a=a)
fix_axes(**kwargs)
def volcano_plot_cool_genes(q, x, y, gene_name, genes, dfplot, dfgenes,
ax, colors, a=1, dfplotgenes='ens_gene',
downsample=1, **kwargs):
"""
Plots all the tissue specific genes, i.e. all genes that appear in one
and only one 'tissue'
"""
f = lambda x: (dfplot[dfplotgenes].isin(x)) # & (dfplot.qval < q)
nvals = len(dfgenes[gene_name].unique())
ncolors = len(colors)
if nvals > ncolors:
raise ValueError('Please provide as many colors as there are datasets.\
{0} {1}'.format(ncolors, nvals))
for i, gname in enumerate(dfgenes[gene_name].unique()):
# slice out desired genes
selected_genes = (dfgenes[gene_name] == gname)
ind = f(dfgenes[selected_genes][genes])
sliced = dfplot[ind].dropna().copy()
# no. genes showing up in assay
ngsig = len(sliced[(sliced[y] < q)][dfplotgenes].unique())
ds_select = np.floor(len(sliced)*downsample)
ds = np.random.randint(0, len(sliced), int(ds_select))
xcoord = sliced[x].values[ds]
ycoord = sliced[y].values[ds]
# no. of genes in dataset
tg = len(dfgenes[dfgenes[gene_name] == gname][genes].unique())
label = '{0} {1}= {2}, {3}= {4}'.format(gname, r'$n_{sig}$',
ngsig, r'$n_{tot}$', tg)
plt.gca().plot(xcoord, -np.log10(ycoord), 'o',
color=colors[i], ms=6, alpha=a, label=label)
def explode_cool_genes(q, x, y, gene_name, genes, dfplot, dfgenes, colors,
dfplotgenes='ens_gene', **kwargs):
"""A function that generates all the relevant volcano plots."""
a = kwargs.pop('a', .6)
loc = kwargs.pop('loc', 'lower right')
savename = kwargs.pop('savename', '')
xlim = kwargs.pop('xlim', '')
ylim = kwargs.pop('ylim', '')
downsample = kwargs.pop('downsample', 1)
# sig genes not in any given df
ind1 = (~dfplot[dfplotgenes].isin(dfgenes[genes])) & (dfplot[y] < q)
ind4 = (dfplot[y] < q) # for calculating total no. of sig genes
# no sig genes
# ind 2 is for plotting purposes
# ind 3 is to calculate total number of non-sig genes
ind2 = (~dfplot[dfplotgenes].isin(dfgenes[genes])) & (dfplot[y] > q)
ind3 = (dfplot[y] > q)
xnotsig = dfplot[ind2][x].values
ynotsig = dfplot[ind2][y].values
xsig = dfplot[ind1][x].values
ysig = dfplot[ind1][y].values
# down_sample selection:
ds_select = np.floor(len(xnotsig)*downsample)
ds = np.random.randint(0, len(xnotsig), int(ds_select))
nnotsig = len(dfplot[ind3][dfplotgenes].unique())
fig, ax = plt.subplots()
plt.plot(xnotsig[ds], -np.log10(ynotsig[ds]), 'o',
color=colors[0], ms=6, alpha=.15,
label=r'not significant, $n$='+'{0}'.format(nnotsig))
# down_sample selection:
ds_select = np.floor(len(xsig)*downsample)
ds = np.random.randint(0, len(xsig), int(ds_select))
nsig = len(dfplot[ind4][dfplotgenes].unique())
plt.plot(xsig[ds], -np.log10(ysig[ds]), 'o',
color=colors[1], ms=6, alpha=.25,
label=r'diff. exp. genes $n$=' '{0}'.format(nsig))
# plot all the points not associated with a tissue
volcano_plot_cool_genes(q, x, y, gene_name, genes, dfplot,
dfgenes, colors=colors[2:], ax=ax, a=a,
downsample=downsample)
fix_axes(loc=loc, **kwargs)
leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=True)
leg.get_frame().set_facecolor('#EAEAF4') # seaborn color
plt.gca().set_xlim(xlim)
plt.gca().set_ylim(ylim)
if savename:
fig.savefig(savename, dpi=300, format='png', bbox_extra_artists=(leg,),
bbox_inches='tight')
return fig, ax
def kde_tissue(tissue, q, genes, x, y, dfplot, dfindex, ax, label, col='b'):
"""
Plots all the tissue specific genes, i.e. all genes that appear in one
and only one 'tissue'
tissue -- tissue to plot
q -- qvalue to slice on
dfindex -- the dataframe generated by organizer
dfplot -- the dataframe containing columns x, y and genes
x -- the name of the column containing the values to plot in the histogram
y -- name of the column with which to slice the dataframe (q or p value)
genes -- the name of the column containing the WBID names
label -- name of the plot just made
ax -- axis to plot in
col -- color
"""
g = lambda x: ((dfindex.expressed == 1) & (dfindex.tissue == x))
# & (~dfindex[dfindex.expressed == 1].duplicated('gene'))
f = lambda x: (dfplot[genes].isin(x)) & (dfplot[y] < q)
gene_selection = g(tissue)
genes_to_plot = dfindex[gene_selection].gene
ind = f(genes_to_plot)
to_plot = dfplot[ind][x]
n = len(dfplot[ind][genes].unique())
if len(to_plot) > 15:
sns.kdeplot(to_plot, color=col, label=label+' $n$= {0}'.format(n),
ax=ax, lw=4, cut=0.5)
if len(to_plot) <= 20:
sns.rugplot(to_plot, color=col, ax=ax, height=.07, lw=2)
def tissue_kegg(q, genes, x, y, dfplot, dfindex, colors, **kwargs):
"""
A function that given a dataframe dfplot and the tissue dataframe dfindex
generates a KDE plot for each tissue that exists in dfindex
q -- qvalue to slice on
dfindex -- the dataframe as generated by organizer
dfplot -- df containing columns x, y, and gene
x -- the name of the column containing the values to plot in the histogram
y -- name of the column with which to slice the dataframe (q or p value)
genes -- the name of the column containing the WBID names
label -- name of the plot just made
ax -- axis to plot in
col -- color
"""
# set scale parameters
yscale = kwargs.pop('yscale', 'linear')
xscale = kwargs.pop('xscale', 'linear')
xlim = kwargs.pop('xlim', [-8, 8])
ylim = kwargs.pop('ylim', [0, .5])
ind1 = (dfindex.expressed == 0)
# | (dftiss[dftiss.value==1].duplicated('gene'))
ind2 = (dfplot[genes].isin(dfindex[ind1].gene)) & (dfplot[y] < q)
xnotisssig = dfplot[ind2][x]
n = len(dfplot[ind2][genes].unique())
fig, ax = plt.subplots()
sns.kdeplot(xnotisssig,
color=colors[0], label='all others $n$= {0}'.format(n), ax=ax,
lw=4, cut=0.5)
plt.axvline(0, ls='--', color='black', lw=3)
# plot all the points not associated with a tissue
values = dfindex.tissue.unique()
for i, value in enumerate(values):
kde_tissue(value, q, genes, x, y, dfplot, dfindex, label=value,
col=colors[i+1], ax=ax)
fix_axes(xscale=xscale, yscale=yscale, xlim=xlim, ylim=ylim, **kwargs)
return fig, ax
def kde_value(value, q, dfplot, dfindex, ax, label,
col='b', min_length=10, rug_length=20):
"""Plots all the value specific genes."""
g = (dfindex.effect == value)
f = lambda x: (dfplot.ens_gene.isin(x)) & (dfplot.qval < q)
genes_to_plot = dfindex[g].gene
ind = f(genes_to_plot)
x = dfplot[ind].b
n = len(dfplot[ind].ens_gene.unique())
if len(x) > min_length:
sns.kdeplot(x, color=col, label=label+' $n$= {0}'.format(n), ax=ax,
lw=5, cut=0.5)
if len(x) < rug_length:
sns.rugplot(x, color=col, ax=ax, height=.1, lw=2)
else:
print('too few values to plot {0}'.format(label+' n= {0}'.format(n)))
def kegg_compare_byval(value, q, Ldf, dfindex, colors, **kwargs):
"""
Given a list of dataframes, Ldf, and a list of target genes dfindex,
compare the distributions of genes within dfindex throughout every
list for genes with trait 'value'.
Ldf= a list of dataframes. must have df.ens_gene, df.b, df.qval exactly
as written.
dfindex= a list of genes to select, must have df.gene, df.effect
value= a trait associated with genes in dfindex, (an entry in df.effect)
colors= an array of len(Ldf) of colors
"""
dfnames = kwargs.pop('dfnames', ['']*len(Ldf))
xlim = kwargs.pop('xlim', [-10, 10])
ylim = kwargs.pop('ylim', [0, 1])
zeroline = kwargs.pop('zeroline', True)
xscale = kwargs.pop('xscale', 'linear')
yscale = kwargs.pop('yscale', 'linear')
save = kwargs.pop('save', False)
if len(Ldf) > len(colors):
raise ValueError('Please provide as many colors as dataframes')
if len(dfindex[dfindex.effect == value]) == 0:
raise ValueError('Value \'{0}\' is not contained\
within dfindex'.format(value))
if dfnames:
if len(Ldf) != len(dfnames):
raise ValueError('dfnames must be the same length as Ldf')
fig, ax = plt.subplots()
for i, df in enumerate(Ldf):
kde_value(value, q, df, dfindex, ax, dfnames[i], colors[i])
if zeroline:
plt.axvline(0, ls='--', color='black', lw=2.5)
if save:
sv = '../output/Graphs/effect_'+value
fix_axes(xlim=xlim, ylim=ylim, xscale=xscale, yscale=yscale,
savename=sv, **kwargs)
else:
fix_axes(xlim=xlim, ylim=ylim, xscale=xscale, yscale=yscale, **kwargs)
def kegg_compareall_byval(q, Ldf, dfindex, colors, **kwargs):
"""
Given a list of dataframes Ldf, and a list of selection genes with criteria
make all the plots of interest
"""
vals = dfindex.effect.unique()
titles = kwargs.pop('titles', ['']*len(vals))
if len(titles) < len(vals):
errormess = 'There are not enough titles for plots'.format(len(titles),
len(vals))
raise ValueError(errormess)
for i, value in enumerate(vals):
if titles[i]:
title = titles[i]
else:
title = value
kegg_compare_byval(value, q, Ldf, dfindex, colors,
title=title, **kwargs)
def line_prepender(filename, line):
"""
Given a filename, opens it and prepends the line 'line'
at the beginning of the file
"""
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
| mit |
cogeorg/BlackRhino | examples/degroot/networkx/readwrite/tests/test_gml.py | 35 | 3099 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_duplicate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
def test_bool(self):
G=networkx.Graph()
G.add_node(1,on=True)
G.add_edge(1,2,on=False)
data = '\n'.join(list(networkx.generate_gml(G)))
answer ="""graph [
node [
id 0
label 1
on 1
]
node [
id 1
label 2
]
edge [
source 0
target 1
on 0
]
]"""
assert_equal(data,answer)
| gpl-3.0 |
mehdidc/scikit-learn | sklearn/preprocessing/tests/test_label.py | 26 | 21027 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_true(assert_warns(DeprecationWarning, getattr, lb_1, "multilabel_"))
assert_false(assert_warns(DeprecationWarning, getattr, lb_1,
"indicator_matrix_"))
assert_array_equal(out_2, binaryclass_array)
assert_false(assert_warns(DeprecationWarning, getattr, lb_2,
"multilabel_"))
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_true(assert_warns(DeprecationWarning, getattr, lb_1, "multilabel_"))
assert_array_equal(out_2, indicator)
assert_false(assert_warns(DeprecationWarning, getattr, lb_2,
"multilabel_"))
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
y = np.array([[0, 1, 0], [1, 1, 1]])
classes = np.arange(3)
assert_raises(ValueError, label_binarize, y, classes, multilabel=True,
neg_label=2, pos_label=1)
assert_raises(ValueError, label_binarize, y, classes, multilabel=True,
neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
"""Ensure sequences of the same length are not interpreted as a 2-d array
"""
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_seq = [(1,), (0, 1, 2), tuple()]
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
deprecation_message = ("Direct support for sequence of sequences " +
"multilabel representation will be unavailable " +
"from version 0.17. Use sklearn.preprocessing." +
"MultiLabelBinarizer to convert to a label " +
"indicator representation.")
assert_warns_message(DeprecationWarning, deprecation_message,
check_binarized_results, y_seq, classes, pos_label,
neg_label, expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_deprecation_inverse_binarize_thresholding():
deprecation_message = ("Direct support for sequence of sequences " +
"multilabel representation will be unavailable " +
"from version 0.17. Use sklearn.preprocessing." +
"MultiLabelBinarizer to convert to a label " +
"indicator representation.")
assert_warns_message(DeprecationWarning, deprecation_message,
_inverse_binarize_thresholding,
y=csr_matrix([[1, 0], [0, 1]]),
output_type="multilabel-sequences",
classes=[1, 2], threshold=0)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
lstephen/ootp-ai | src/main/python/regression.py | 1 | 9590 | import click
import json
import logging
import numpy as np
import scipy
import sys
import time
from sklearn.decomposition import FastICA, PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import HuberRegressor, LinearRegression
from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, FunctionTransformer
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
class RandomForest:
def __init__(self, xs, ys, weights):
param_grid = {
'selection__k': scipy.stats.randint(2, xs.shape[1] + 1),
'regressor__n_estimators': scipy.stats.randint(1, 100),
'regressor__max_depth': scipy.stats.randint(1, 50)
}
feature_selection = SelectKBest(f_regression)
regressor = RandomForestRegressor(
random_state=42, min_weight_fraction_leaf=0.01)
pipeline = Pipeline(
steps=[('selection', feature_selection), ('regressor', regressor)])
self._cv = RandomizedSearchCV(
pipeline, param_grid, n_iter=100, cv=3, random_state=42)
self._cv.fit(xs, ys, regressor__sample_weight=weights)
def pipeline(self):
return self._cv.best_estimator_
def estimator(self):
return self.pipeline()
def cross_val_score(self, xs, ys, weights):
return np.mean(
cross_val_score(
self.pipeline(),
xs,
ys,
cv=3,
fit_params={'regressor__sample_weight': weights}))
def fit(self, xs, ys, weights):
self.pipeline().fit(xs, ys, regressor__sample_weight=weights)
def report(self, out):
out.write("Best Parameters: {}\n".format(self._cv.best_params_))
out.write("Feature Scores: {}\n".format(
np.round_(self.pipeline().named_steps['selection'].scores_), 3))
out.write("Feature Mask: {}\n".format(self.pipeline().named_steps[
'selection'].get_support()))
out.write("Feature Importances: {}\n".format(self.feature_importances(
)))
def feature_importances(self):
return np.round_(
self.pipeline().named_steps['regressor'].feature_importances_, 3)
def __repr__(self):
return "RandomForest(...)"
def flatten_matrix(m):
return m.flatten()
class Isotonic:
def __init__(self, xs, ys, weights):
param_grid = {'regressor__increasing': [True, False]}
feature_selection = SelectKBest(f_regression, k=1)
# We can't use a lambda as it can't be pickled
flatten = FunctionTransformer(flatten_matrix, validate=False)
regressor = IsotonicRegression(out_of_bounds='clip')
pipeline = Pipeline(
steps=[('scaler', MinMaxScaler()),
('selection', feature_selection), ('flatten', flatten),
('regressor', regressor)])
self._cv = GridSearchCV(pipeline, param_grid, cv=3)
self._cv.fit(xs, ys, regressor__sample_weight=weights)
def pipeline(self):
return self._cv.best_estimator_
def estimator(self):
return self.pipeline()
def cross_val_score(self, xs, ys, weights):
return np.mean(
cross_val_score(
self.pipeline(),
xs,
ys,
cv=3,
fit_params={'regressor__sample_weight': weights}))
def fit(self, xs, ys, weights):
self.pipeline().fit(xs, ys, regressor__sample_weight=weights)
def report(self, out):
out.write("Best Parameters: {}\n".format(self._cv.best_params_))
out.write("Feature Scores: {}\n".format(
np.round_(self.pipeline().named_steps['selection'].scores_), 3))
out.write("Feature Mask: {}\n".format(self.pipeline().named_steps[
'selection']._get_support_mask()))
def __repr__(self):
return "Isotonic(...)"
class Huber:
def __init__(self, xs, ys, weights):
param_grid = {'selection__k': range(1, xs.shape[1] + 1), }
feature_selection = SelectKBest(f_regression)
regressor = HuberRegressor(fit_intercept=True)
pipeline = Pipeline(steps=[('scaler', MinMaxScaler()),
('selection', feature_selection),
('regressor', regressor)])
self._cv = GridSearchCV(pipeline, param_grid, cv=3)
self._cv.fit(xs, ys, regressor__sample_weight=weights)
def pipeline(self):
return self._cv.best_estimator_
def estimator(self):
return self.pipeline()
def cross_val_score(self, xs, ys, weights):
return np.mean(
cross_val_score(
self.pipeline(),
xs,
ys,
cv=3,
fit_params={'regressor__sample_weight': weights}))
def fit(self, xs, ys, weights):
self.pipeline().fit(xs, ys, regressor__sample_weight=weights)
def report(self, out):
out.write("Best Parameters: {}\n".format(self._cv.best_params_))
out.write("Feature Scores: {}\n".format(
np.round_(self.pipeline().named_steps['selection'].scores_), 3))
out.write("Coefficients: {}\n".format(self.pipeline().named_steps[
'regressor'].coef_))
out.write("Intercept: {}\n".format(self.pipeline().named_steps[
'regressor'].intercept_))
out.write("Feature Mask: {}\n".format(self.pipeline().named_steps[
'selection']._get_support_mask()))
def __repr__(self):
return "Huber(...)"
class Linear:
def __init__(self, xs, ys, weights):
param_grid = {'selection__k': range(1, xs.shape[1] + 1), }
feature_selection = SelectKBest(f_regression)
regressor = LinearRegression(fit_intercept=True)
pipeline = Pipeline(
steps=[('selection', feature_selection), ('regressor', regressor)])
self._cv = GridSearchCV(pipeline, param_grid, cv=3)
self._cv.fit(xs, ys, regressor__sample_weight=weights)
def pipeline(self):
return self._cv.best_estimator_
def estimator(self):
return self.pipeline()
def cross_val_score(self, xs, ys, weights):
return np.mean(
cross_val_score(
self.pipeline(),
xs,
ys,
cv=3,
fit_params={'regressor__sample_weight': weights}))
def fit(self, xs, ys, weights):
self.pipeline().fit(xs, ys, regressor__sample_weight=weights)
def report(self, out):
out.write("Best Parameters: {}\n".format(self._cv.best_params_))
out.write("Feature Scores: {}\n".format(
np.round_(self.pipeline().named_steps['selection'].scores_), 3))
out.write("Coefficients: {}\n".format(self.pipeline().named_steps[
'regressor'].coef_))
out.write("Intercept: {}\n".format(self.pipeline().named_steps[
'regressor'].intercept_))
out.write("Feature Mask: {}\n".format(self.pipeline().named_steps[
'selection']._get_support_mask()))
def __repr__(self):
return "Linear(...)"
@click.group()
def cli():
pass
@cli.command()
@click.argument('model', type=click.File('wb'))
def train(model):
start = time.perf_counter()
data = json.load(sys.stdin)
weights = np.array([d['weight'] for d in data])
xs = np.matrix([d['features'] for d in data])
ys = np.array([d['label'] for d in data])
rf = RandomForest(xs, ys, weights)
iso = Isotonic(xs, ys, weights)
h = Huber(xs, ys, weights)
l = Linear(xs, ys, weights)
best = iso
if sorted(rf.feature_importances())[1] > 0.1 and rf.cross_val_score(
xs, ys, weights) > iso.cross_val_score(xs, ys, weights):
best = rf
if h.cross_val_score(xs, ys, weights) > best.cross_val_score(xs, ys,
weights):
best = h
if l.cross_val_score(xs, ys, weights) > best.cross_val_score(xs, ys,
weights):
best = l
best.fit(xs, ys, weights)
joblib.dump(best.estimator(), model)
logging.info("Trained on {} inputs in {:.3f} seconds.".format(
len(data), time.perf_counter() - start))
sys.stdout.write("Selected: {}\n".format(best.__class__.__name__))
estimators = [(e.cross_val_score(xs, ys, weights), e)
for e in [rf, iso, h, l]]
sys.stdout.write("Scores: {}\n".format(estimators))
best.report(sys.stdout)
@cli.command()
def predict():
start = time.perf_counter()
data = json.load(sys.stdin)
predictions = {
k: predict(data['data'], m)
for k, m in data['models'].items()
}
sys.stdout.write(json.dumps(predictions))
logging.info("Predicted {} inputs for {} models in {:.3f} seconds.".format(
len(data['data']), len(data['models']), time.perf_counter() - start))
def predict(data, model):
start = time.perf_counter()
if (len(data) == 0):
predictions = []
else:
xs = np.matrix([d['features'] for d in data])
m = joblib.load(model)
predictions = list(np.clip(m.predict(xs), 0, None))
return predictions
if __name__ == '__main__':
cli()
| apache-2.0 |
bhillmann/gingivere | tests/train_clf.py | 2 | 3654 | import math
import time
import sys
import multiprocessing
import os
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_auc_score
from joblib import Parallel, delayed
from tests import shelve_api as sapi
print(__doc__)
def load_training_data(file):
store = pd.HDFStore(file)
X = store['X']
l = len(X)
if 'interictal' in file:
y = np.array([0.0]*l)
else:
y = np.array([1.0]*l)
store.close()
return X, y
def walk_training_data(patient):
path = "D:/gingivere/data/"
for file in os.listdir(path):
if patient in file:
if not ("test" in file):
yield path + file
def walk_testing_data(patient):
pass
def sort_data(data):
X = []
y = []
for result in data:
for row in result[0].values:
X.append(row)
y = y + list(result[1])
return np.asarray(X), np.asarray(y)
def train_clf(X, y, verbose=True):
clf = LinearRegression()
XX = X[80000:]
yy = y[80000:]
if verbose:
skf = StratifiedKFold(yy, n_folds=2)
for train_index, test_index in skf:
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_train, X_test = XX[train_index], XX[test_index]
y_train, y_test = yy[train_index], yy[test_index]
clf.fit(X_train, y_train)
y, y_pred = y, clf.predict(X)
# for i, num in enumerate(y_pred):
# if num < 0.0:
# y_pred[i] = 0.0
# continue
# elif num > 1.0:
# y_pred[i] = 1.0
# continue
y_pred = y_pred - y_pred.mean()
y_pred = y_pred/y_pred.std()
y_pred = [1/(1+math.pow(math.e, -.5*p)) for p in y_pred]
print(classification_report(np.around(y), np.around(y_pred)))
print()
print(roc_auc_score(y, y_pred))
print()
# for train_index, test_index in skf:
# print("Detailed classification report:")
# print()
#
# y_true, y_pred = y, clf.predict(X)
# print(roc_auc_score(y_true, y_pred))
# y_pred = y_pred - y_pred.mean()
# y_pred = y_pred/y_pred.std()
# y_pred = [1/(1+math.pow(math.e, -.5*p)) for p in y_pred]
# print(roc_auc_score(y_true, y_pred))
# print()
clf.fit(X, y)
return clf
if __name__ == '__main__':
patients = ["Dog_1", "Dog_2", "Dog_3", "Dog_4", "Dog_5", "Patient_1", "Patient_2"]
# d_keys = ['data_length_sec', 'sampling_frequency', 'sequence', 'state', 'file']
num_cores = multiprocessing.cpu_count()
now = time.time()
if len(sys.argv) >= 2:
patient = sys.argv[1]
res = Parallel(n_jobs=num_cores)(delayed(load_training_data)(file) for file in walk_training_data(patient))
X, y = sort_data(res)
clf = train_clf(X, y)
sapi.insert(clf, "%s_clf" % patient)
else:
for patient in patients:
res = Parallel(n_jobs=num_cores)(delayed(load_training_data)(file) for file in walk_training_data(patient))
X, y = sort_data(res)
clf = train_clf(X, y)
sapi.insert(clf, "%s_clf" % patient)
print("Finished in", time.time()-now , "sec")
| mit |
datacommonsorg/data | scripts/eurostat/regional_statistics_by_nuts/birth_death_migration/test_import.py | 1 | 3688 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from import_data import preprocess_data, clean_data
import pandas as pd
class TestPreprocess(unittest.TestCase):
maxDiff = None
def _test_preprocess_output(self, dir_path: str):
"""Generates a melted csv file, given an input data file.
Compares the expected.csv to the output.csv file
to make sure the function is performing as designed.
Args:
dir_path (str): the path of the directory containing:
data.tsv and expected.csv.
Returns:
str: expected output == actual output.
"""
input_path = os.path.join(dir_path, "data.tsv")
output_path = os.path.join(dir_path, "output.csv")
expected_path = os.path.join(dir_path, "expected.csv")
if not os.path.exists(input_path):
self.fail(input_path + " doesn't exist!")
if not os.path.exists(expected_path):
self.fail(expected_path + " doesn't exist!")
# Generate the output csv file.
input_df = pd.read_csv(input_path, sep='\s*\t\s*', engine='python')
preprocess_data(input_df).to_csv(output_path, index=False)
# Get the content from the csv file.
with open(output_path, 'r+') as actual_f:
actual: str = actual_f.read()
# Get the content of the expected output.
with open(expected_path, 'r+') as expected_f:
expected: str = expected_f.read()
self.assertEqual(actual, expected)
def _test_csv_output(self, dir_path: str):
"""Generates a csv file, given an input data file.
Compares the expected.csv to the output.csv file
to make sure the function is performing as designed.
Args:
dir_path (str): the path of the directory containing:
data.tsv and expected.csv.
Returns:
str: expected output == actual output.
"""
input_path = os.path.join(dir_path, "data.tsv")
output_path = os.path.join(dir_path, "output.csv")
expected_path = os.path.join(dir_path, "expected.csv")
if not os.path.exists(input_path):
self.fail(input_path + " doesn't exist!")
if not os.path.exists(expected_path):
self.fail(expected_path + " doesn't exist!")
# Generate the output csv file.
input_df = pd.read_csv(input_path, sep='\s*\t\s*', engine='python')
clean_data(preprocess_data(input_df), output_path)
# Get the content from the csv file.
with open(output_path, 'r+') as actual_f:
actual: str = actual_f.read()
# Get the content of the expected output.
with open(expected_path, 'r+') as expected_f:
expected: str = expected_f.read()
self.assertEqual(actual, expected)
def test1(self):
"""Simple unit test on melting csv content"""
self._test_preprocess_output('./test/test1')
def test2(self):
"""Simple integration test on output csv content"""
self._test_csv_output('./test/test2')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
abimannans/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
Transkribus/TranskribusDU | TranskribusDU/tasks/TablePrototypes/DU_ABPTableRG4.py | 1 | 38067 | # -*- coding: utf-8 -*-
"""
DU task for ABP Table: doing jointly row BIESO and horizontal grid lines
block2line edges do not cross another block.
Here we make consistent label when any N grid lines have no block in-between
each other.
In that case, those N grid lines must have consistent BISO labels:
- if one is B, all become B
- elif one is S, all become S
- elif one is I, all become I
- else: they should all be O already
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import math
from lxml import etree
import collections
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks import _checkFindColDir, _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from crf.Edge import Edge, SamePageEdge
from crf.Graph_MultiPageXml import Graph_MultiPageXml
from crf.NodeType_PageXml import NodeType_PageXml_type_woText
#from crf.FeatureDefinition_PageXml_std_noText import FeatureDefinition_PageXml_StandardOnes_noText
from crf.FeatureDefinition import FeatureDefinition
from crf.Transformer import Transformer, TransformerListByType
from crf.Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from crf.Transformer_PageXml import NodeTransformerXYWH_v2, NodeTransformerNeighbors, Node1HotFeatures
from crf.Transformer_PageXml import Edge1HotFeatures, EdgeBooleanFeatures_v2, EdgeNumericalSelector
from crf.PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from tasks.DU_ABPTableGrid import GridAnnotator
class GraphGrid(Graph_MultiPageXml):
"""
We specialize the class of graph because the computation of edges is quite specific
"""
# Grid stuff
#Dynamically add a grid
iGridStep_H = 33 #odd number is better
iGridStep_V = 33 #odd number is better
# Some grid line will be O or I simply because they are too short.
fMinPageCoverage = 0.5 # minimum proportion of the page crossed by a grid line
# we want to ignore col- and row- spans
iGridVisibility = 2 # a grid line sees N neighbours below
iBlockVisibility = 1 # a block sees N neighbouring grid lines
_lClassicNodeType = None
@classmethod
def setClassicNodeTypeList(cls, lNodeType):
"""
determine which type of node goes thru the classical way for determining
the edges (vertical or horizontal overlap, with occlusion, etc.)
"""
cls._lClassicNodeType = lNodeType
def parseDocFile(self, sFilename, iVerbose=0):
"""
Load that document as a CRF Graph.
Also set the self.doc variable!
Return a CRF Graph object
"""
self.doc = etree.parse(sFilename)
self.lNode, self.lEdge = list(), list()
self.lNodeBlock = [] # text node
self.lNodeGridLine = [] # grid line node
root = self.doc.getroot()
doer = GridAnnotator(self.iGridStep_H, self.iGridStep_V)
#map the groundtruth table separators, if any, to our grid
ltlHlV = doer.get_grid_GT_index_from_DOM(root, self.fMinPageCoverage)
for (lHi, lVi) in ltlHlV:
traceln(" - found %d horizontal, %d vertical GT separators" % (len(lHi), len(lVi)))
#create DOM node reflecting the grid
#first clean (just in case!)
n = doer.remove_grid_from_dom(root)
if n > 0: traceln(" - removed %d existing grid lines" % n)
# we add GridSeparator elements. Groundtruth ones have type="1"
n = doer.add_grid_to_DOM(root, ltlHlV)
traceln(" - added %d grid lines %s" % (n,
(self.iGridStep_H, self.iGridStep_V)) )
lClassicType = [nt for nt in self.getNodeTypeList() if nt in self._lClassicNodeType]
lSpecialType = [nt for nt in self.getNodeTypeList() if nt not in self._lClassicNodeType]
for pnum, page, domNdPage in self._iter_Page_DocNode(self.doc):
#now that we have the page, let's create the node for each type!
lClassicPageNode = [nd for nodeType in lClassicType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
lSpecialPageNode = [nd for nodeType in lSpecialType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
self.lNode.extend(lClassicPageNode) # e.g. the TextLine objects
self.lNodeBlock.extend(lClassicPageNode)
self.lNode.extend(lSpecialPageNode) # e.g. the grid lines!
self.lNodeGridLine.extend(lSpecialPageNode)
#no previous page to consider (for cross-page links...) => None
lClassicPageEdge = Edge.computeEdges(None, lClassicPageNode)
self.lEdge.extend(lClassicPageEdge)
# Now, compute edges between special and classic objects...
lSpecialPageEdge = self.computeSpecialEdges(lClassicPageNode,
lSpecialPageNode)
self.lEdge.extend(lSpecialPageEdge)
#if iVerbose>=2: traceln("\tPage %5d %6d nodes %7d edges"%(pnum, len(lPageNode), len(lPageEdge)))
if iVerbose>=2:
traceln("\tPage %5d"%(pnum))
traceln("\t block: %6d nodes %7d edges (to block)" %(pnum, len(lClassicPageNode), len(lClassicPageEdge)))
traceln("\t line: %6d nodes %7d edges (from block)"%(pnum, len(lSpecialPageNode), len(lSpecialPageEdge)))
if iVerbose: traceln("\t\t (%d nodes, %d edges)"%(len(self.lNode), len(self.lEdge)) )
return self
@classmethod
def computeSpecialEdges(cls, lClassicPageNode, lSpecialPageNode):
"""
return a list of edges
"""
raise Exception("Specialize this method")
class Edge_BL(Edge):
"""Edge block-to-Line"""
pass
class Edge_LL(Edge):
"""Edge line-to-Line"""
pass
class GraphGrid_H(GraphGrid):
"""
Only horizontal grid lines
"""
def __init__(self):
traceln(" - iGridStep_H : ", self.iGridStep_H)
traceln(" - iGridStep_V : ", self.iGridStep_V)
traceln(" - iGridVisibility : ", self.iGridVisibility)
traceln(" - iBlockVisibility : ", self.iBlockVisibility)
traceln(" - fMinPageCoverage : ", self.fMinPageCoverage)
def getNodeListByType(self, iTyp):
if iTyp == 0:
return self.lNodeBlock
else:
return self.lNodeGridLine
def getEdgeListByType(self, typA, typB):
if typA == 0:
if typB == 0:
return (e for e in self.lEdge if isinstance(e, SamePageEdge))
else:
return (e for e in self.lEdge if isinstance(e, Edge_BL))
else:
if typB == 0:
return []
else:
return (e for e in self.lEdge if isinstance(e, Edge_LL))
@classmethod
def computeSpecialEdges(cls, lClassicPageNode, lSpecialPageNode):
"""
Compute:
- edges between each block and the grid line above/across/below the block
- edges between grid lines
return a list of edges
"""
# indexing the grid lines
dGridLineByIndex = {GridAnnotator.snapToGridIndex(nd.y1, cls.iGridStep_V):nd for nd in lSpecialPageNode}
for nd in lSpecialPageNode:
#print(nd, dGridLineByIndex[GridAnnotator.snapToGridIndex(nd.y1, cls.iGridStep_V)])
assert dGridLineByIndex[GridAnnotator.snapToGridIndex(nd.y1, cls.iGridStep_V)] == nd, "internal error inconsistent grid"
# block to grid line edges
lEdge = []
fLenNorm = float(cls.iGridStep_V * cls.iBlockVisibility)
imin, imax = 100, -1
assert lClassicPageNode, "ERROR: empty page!!??"
for ndBlock in lClassicPageNode:
### print("---- ", ndBlock)
# i1 = GridAnnotator.snapToGridIndex(nd.x1, cls.iGridStep_V)
# i2 = GridAnnotator.snapToGridIndex(nd.x2, cls.iGridStep_V)
i1 = int(math.floor(ndBlock.y1 / float(cls.iGridStep_V)))
i2 = int(math.ceil (ndBlock.y2 / float(cls.iGridStep_V)))
assert i2 >= i1
yBlkAvg = (ndBlock.y1 + ndBlock.y2)/2.0
#Also make visible the iBlockVisibility-1 previous grid lines, if any
for i in range(max(0, i1 - cls.iBlockVisibility + 1), i1+1):
edge = Edge_BL(ndBlock, dGridLineByIndex[i])
edge.len = (yBlkAvg - i * cls.iGridStep_V) / fLenNorm
edge._gridtype = -1
lEdge.append(edge)
imin = min(i, imin)
### print(ndBlock.y1, i, edge.len)
for i in range(max(0, i1+1), max(0, i2)):
ndLine = dGridLineByIndex[i]
edge = Edge_BL(ndBlock, ndLine)
edge.len = (yBlkAvg - i * cls.iGridStep_V) / fLenNorm
edge._gridtype = 0 # grid line is crossing the block
assert ndBlock.y1 < i*cls.iGridStep_V
assert i*cls.iGridStep_V < ndBlock.y2
### print(ndBlock.y1, ndBlock.y2, i, edge.len)
lEdge.append(edge)
imax = max(imax, i)
for i in range(max(0, i2), i2 + cls.iBlockVisibility):
try:
edge = Edge_BL(ndBlock, dGridLineByIndex[i])
except KeyError:
break # out of the grid
edge.len = (yBlkAvg - i * cls.iGridStep_V) / fLenNorm
edge._gridtype = +1
lEdge.append(edge)
imax = max(imax, i)
### print(ndBlock.y2, i, edge.len)
#now filter those edges
n0 = len(lEdge)
lEdge = cls._filterBadEdge(lEdge, imin, imax, dGridLineByIndex)
traceln(" - filtering: removed %d edges due to obstruction." % (len(lEdge) - n0))
if False:
print("--- After filtering: %d edges" % len(lEdge))
lSortedEdge = sorted(lEdge, key=lambda x: x.A.domid)
for edge in lSortedEdge:
print("Block domid=%s y1=%s y2=%s"%(edge.A.domid, edge.A.y1, edge.A.y2)
+ " %s line %s "%(["↑", "-", "↓"][1+edge._gridtype],
edge.B.y1 / cls.iGridStep_V)
+ "domid=%s y1=%s" %(edge.B.domid, edge.B.y1)
)
#what differ from previosu version
cls._makeConsistentLabelForEmptyGridRow(lEdge, lClassicPageNode, dGridLineByIndex)
# grid line to grid line edges
n = len(dGridLineByIndex)
for i in range(n):
A = dGridLineByIndex[i]
for j in range(i+1, min(n, i+cls.iGridVisibility+1)):
edge = Edge_LL(A, dGridLineByIndex[j])
edge.len = (j - i)
lEdge.append(edge)
return lEdge
@classmethod
def _filterBadEdge(cls, lEdge, imin, imax, dGridLineByIndex, fRatio=0.25):
"""
We get
- a list of block2Line edges
- the [imin, imax] interval of involved grid line index
- the dGridLineByIndex dictionary
But some block should not be connected to a line due to obstruction by
another blocks.
We filter out those edges...
return a sub-list of lEdge
"""
lKeepEdge = []
def _xoverlapSrcSrc(edge, lEdge):
"""
does the source node of edge overlap with the source node of any
edge of the list?
"""
A = edge.A
for _edge in lEdge:
if A.significantXOverlap(_edge.A, fRatio): return True
return False
def _yoverlapSrcSrc(edge, lEdge):
"""
does the source node of edge overlap with the source node of any
edge of the list?
"""
A = edge.A
for _edge in lEdge:
if A.significantYOverlap(_edge.A, fRatio): return True
return False
#there are two ways for dealing with lines crossed by a block
# - either it prevents another block to link to the line (assuming an x-overlap)
# - or not (historical way)
# THIS IS THE "MODERN" way!!
#take each line in turn
for i in range(imin, imax+1):
ndLine = dGridLineByIndex[i]
#--- process downward edges
#TODO: index!
lDownwardAndXingEdge = [edge for edge in lEdge \
if edge._gridtype >= 0 and edge.B == ndLine]
if lDownwardAndXingEdge:
#sort edge by source block from closest to line block to farthest
lDownwardAndXingEdge.sort(key=lambda o: o.A.y2 - ndLine.y1,
reverse=True)
lKeepDownwardEdge = [lDownwardAndXingEdge.pop(0)]
#now keep all edges whose source does not overlap vertically with
# the source of an edge that is kept
for edge in lDownwardAndXingEdge:
if not _xoverlapSrcSrc(edge, lKeepDownwardEdge):
lKeepDownwardEdge.append(edge)
lKeepEdge.extend(lKeepDownwardEdge)
#NOTHING to do for crossing edges: they should be in the list!
# #--- keep all crossing edges
# #TODO: index!
# lCrossingEdge = [edge for edge in lEdge \
# if edge._gridtype == 0 and edge.B == ndLine]
#
# lKeepEdge.extend(lCrossingEdge)
#--- process upward edges
#TODO: index!
lUpwarAndXingdEdge = [edge for edge in lEdge \
if edge._gridtype <= 0 and edge.B == ndLine]
if lUpwarAndXingdEdge:
#sort edge by source block from closest to line block to farthest
lUpwarAndXingdEdge.sort(key=lambda o: ndLine.y2 - o.A.y1,
reverse=True)
lKeepUpwardEdge = [lUpwarAndXingdEdge.pop(0)]
#now keep all edges whose source does not overlap vertically with
# the source of an edge that is kept
for edge in lUpwarAndXingdEdge:
if not _xoverlapSrcSrc(edge, lKeepUpwardEdge):
lKeepUpwardEdge.append(edge)
# now we keep only the edges, excluding the crossing ones
# (already included!!)
lKeepEdge.extend(edge for edge in lKeepUpwardEdge \
if edge._gridtype != 0)
return lKeepEdge
@classmethod
def _makeConsistentLabelForEmptyGridRow(cls, lEdge, lBlockNode, dGridLineByIndex):
"""
Here we make consistent label when any N grid lines have no block in-between
each other.
In that case, those N grid lines must have consistent BISO labels:
- if one is B, all become B
- elif one is S, all become S
- elif one is I, all become I
- else: they should all be O already (or not annotated!)
lLabels_BISO_Grid = ['B', 'I', 'S', 'O']
NOTE: I'm favoring safe and clean code to efficient code, for experimenting.
TODO: optimize! (if it performs better...)
"""
bDBG = False
#list object in each interval between 2 edges
dsetObjectsByInterval = collections.defaultdict(set)
imax = -1
for ndBlock in lBlockNode:
### print("---- ", ndBlock)
# i1 = GridAnnotator.snapToGridIndex(nd.x1, cls.iGridStep_V)
# i2 = GridAnnotator.snapToGridIndex(nd.x2, cls.iGridStep_V)
i1 = int(math.floor(ndBlock.y1 / float(cls.iGridStep_V)))
i2 = int(math.ceil (ndBlock.y2 / float(cls.iGridStep_V)))
for i in range(i1, i2):
dsetObjectsByInterval[i].add(ndBlock)
imax = max(imax, i2)
# actually the imax is the index of the last positive grid line ('B')
j = imax
lj = list(dGridLineByIndex.keys())
lj.sort(reverse=True)
for j in lj:
if dGridLineByIndex[j].node.get('type') == 'B':
imax = max(imax, j)
break
#enumerate empty intervals
lEmptyIntervalIndex = [i for i in range(0, imax+1) \
if bool(dsetObjectsByInterval[i]) == False]
if bDBG:
traceln("nb empty intervals: %d"%len(lEmptyIntervalIndex))
traceln([(j, dGridLineByIndex[j].domid, dGridLineByIndex[j].node.get('type')) for j in lEmptyIntervalIndex])
#Make consistent labelling (if any labelling!!)
if lEmptyIntervalIndex:
k = 0 #index in lEmptyInterval list
kmax = len(lEmptyIntervalIndex)
while k < kmax:
i = lEmptyIntervalIndex[k]
dk = 1
while (k + dk) < kmax and lEmptyIntervalIndex[k+dk] == (i + dk):
dk += 1
if bDBG:
nd = dGridLineByIndex[i]
traceln("--- start grid line %s %s (nb=%d ending at %s) cls=%s" %(nd.domid, i, dk-1,dGridLineByIndex[i+dk-1].domid, nd.cls))
#TO FIX!!!!
# #we have a series of consecutive empty interval between i and i+dk (excluded)
# lCls = [dGridLineByIndex[j].cls for j in range(i, min(i+dk+1, kmax))]
# # we go to i+dk+1 because last boundary line may propagate its label
# #the node labels are loaded later on... :-(((
#
# if 0 in lCls: # B
# iUniformClass = 0
# elif 2 in lCls: # S
# iUniformClass = 2
# elif 1 in lCls: # I
# iUniformClass = 1
# elif 3 in lCls: # O
# iUniformClass = 3
# else: #unannotated
# if bDBG: traceln("No annotation: ", lCls)
# iUniformClass = None
#
# if not iUniformClass is None:
# for j in range(i, i+dk):
# if bDBG:
# nd = dGridLineByIndex[j]
# traceln("grid line %s %s made %d from %s"%(nd.domid, j, iUniformClass, nd.cls))
# dGridLineByIndex[j].cls = iUniformClass
#WORKAROUND
lCls = [dGridLineByIndex[j].node.get('type') for j in range(i, min(i+dk+1, imax+1))]
# we go to i+dk+1 because last boundary line may propagate its label
if 'B' in lCls: # B
cUniformClass = 'B'
elif 'S' in lCls: # S
cUniformClass = 'S'
elif 'I' in lCls: # I
cUniformClass = 'I'
elif 'O' in lCls: # O
cUniformClass = 'O'
else: #unannotated
if bDBG: traceln("No annotation: ", lCls)
cUniformClass = None
if not cUniformClass is None:
for j in range(i, i+dk):
if bDBG:
nd = dGridLineByIndex[j]
traceln("grid line %s %s made %s from %s"%(nd.domid, j, cUniformClass, nd.node.get('type')))
dGridLineByIndex[j].node.set('type', cUniformClass)
k = k + dk
return
#------------------------------------------------------------------------------------------------------
class GridLine_NodeTransformer_v2(Transformer):
"""
features of a grid line:
- horizontal or vertical.
"""
def transform(self, lNode):
#We allocate TWO more columns to store in it the tfidf and idf computed at document level.
#a = np.zeros( ( len(lNode), 10 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
a = np.zeros( ( len(lNode), 6 ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, blk in enumerate(lNode):
page = blk.page
if abs(blk.x2 - blk.x1) > abs(blk.y1 - blk.y2):
#horizontal
v = 2*blk.y1/float(page.h) - 1 # to range -1, +1
a[i,0:3] = (1.0, v, v*v)
else:
#vertical
v = 2*blk.x1/float(page.w) - 1 # to range -1, +1
a[i,3:6] = (1.0, v, v*v)
return a
class Block2GridLine_EdgeTransformer(Transformer):
"""
features of a block to grid line edge:
- below, crossing, above
"""
def transform(self, edge):
a = np.zeros( ( len(edge), 3 + 3 + 3) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, edge in enumerate(edge):
z = 1 + edge._gridtype # _gridtype is -1 or 0 or 1
a[i, z] = 1.0
a[i, 3 + z] = edge.len # normalised on [0, 1] edge length
a[i, 6 + z] = edge.len * edge.len
return a
class GridLine2GridLine_EdgeTransformer(Transformer):
"""
features of a block to grid line edge:
- below, crossing, above
"""
def transform(self, edge):
a = np.zeros( ( len(edge), GraphGrid_H.iGridVisibility ) , dtype=np.float64) # 4 possible orientations: 0, 1, 2, 3
for i, edge in enumerate(edge):
a[i, edge.len - 1] = 1.0 # edge length (number of steps)
return a
class My_FeatureDefinition_v2(FeatureDefinition):
"""
Multitype version:
so the node_transformer actually is a list of node_transformer of length n_class
the edge_transformer actually is a list of node_transformer of length n_class^2
We also inherit from FeatureDefinition_T !!!
"""
n_QUANTILES = 16
def __init__(self, **kwargs):
"""
set _node_transformer, _edge_transformer, tdifNodeTextVectorizer
"""
FeatureDefinition.__init__(self)
nbTypes = self._getTypeNumber(kwargs)
print("BETTER FEATURES")
block_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("xywh", Pipeline([
('selector', NodeTransformerXYWH_v2()),
#v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
#v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("1hot", Pipeline([
('1hot', Node1HotFeatures()) #does the 1-hot encoding directly
])
)
])
grid_line_transformer = GridLine_NodeTransformer_v2()
self._node_transformer = TransformerListByType([block_transformer, grid_line_transformer])
edge_BB_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("1hot", Pipeline([
('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))
])
)
, ("boolean", Pipeline([
('boolean', EdgeBooleanFeatures_v2())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector()),
#v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
] )
edge_BL_transformer = Block2GridLine_EdgeTransformer()
edge_LL_transformer = GridLine2GridLine_EdgeTransformer()
self._edge_transformer = TransformerListByType([edge_BB_transformer,
edge_BL_transformer,
edge_BL_transformer, # useless but required
edge_LL_transformer
])
self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer
def fitTranformers(self, lGraph,lY=None):
"""
Fit the transformers using the graphs, but TYPE BY TYPE !!!
return True
"""
self._node_transformer[0].fit([nd for g in lGraph for nd in g.getNodeListByType(0)])
self._node_transformer[1].fit([nd for g in lGraph for nd in g.getNodeListByType(1)])
self._edge_transformer[0].fit([e for g in lGraph for e in g.getEdgeListByType(0, 0)])
self._edge_transformer[1].fit([e for g in lGraph for e in g.getEdgeListByType(0, 1)])
#self._edge_transformer[2].fit([e for g in lGraph for e in g.getEdgeListByType(1, 0)])
#self._edge_transformer[3].fit([e for g in lGraph for e in g.getEdgeListByType(1, 1)])
return True
class DU_ABPTableRG4(DU_CRF_Task):
"""
We will do a CRF model for a DU task
, with the below labels
"""
sXmlFilenamePattern = "*.mpxml"
iGridStep_H = None
iGridStep_V = None
iGridVisibility = None
iBlockVisibility = None
#=== CONFIGURATION ====================================================================
@classmethod
def getConfiguredGraphClass(cls):
"""
In this class method, we must return a configured graph class
"""
# Textline labels
# Begin Inside End Single Other
lLabels_BIESO = ['B', 'I', 'E', 'S', 'O']
# Grid lines:
# Border Ignore Separator Outside
lLabels_BISO_Grid = ['B', 'I', 'S', 'O']
#DEFINING THE CLASS OF GRAPH WE USE
DU_GRAPH = GraphGrid_H
DU_GRAPH.iGridStep_H = cls.iGridStep_H
DU_GRAPH.iGridStep_V = cls.iGridStep_V
DU_GRAPH.iGridVisibility = cls.iGridVisibility
DU_GRAPH.iBlockVisibility = cls.iBlockVisibility
# ROW
ntR = NodeType_PageXml_type_woText("row"
, lLabels_BIESO
, None
, False
, BBoxDeltaFun=lambda v: max(v * 0.066, min(5, v/3)) #we reduce overlap in this way
)
ntR.setLabelAttribute("DU_row")
ntR.setXpathExpr( (".//pc:TextLine" #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntR)
# HEADER
ntGH = NodeType_PageXml_type_woText("gh"
, lLabels_BISO_Grid
, None
, False
, None # equiv. to: BBoxDeltaFun=lambda _: 0
)
ntGH.setLabelAttribute("type")
ntGH.setXpathExpr( ('.//pc:GridSeparator[@orient="0"]' #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntGH)
DU_GRAPH.setClassicNodeTypeList( [ntR ])
return DU_GRAPH
def __init__(self, sModelName, sModelDir,
iGridStep_H = None,
iGridStep_V = None,
iGridVisibility = None,
iBlockVisibility = None,
sComment=None,
C=None, tol=None, njobs=None, max_iter=None,
inference_cache=None):
DU_ABPTableRG4.iGridStep_H = iGridStep_H
DU_ABPTableRG4.iGridStep_V = iGridStep_V
DU_ABPTableRG4.iGridVisibility = iGridVisibility
DU_ABPTableRG4.iBlockVisibility = iBlockVisibility
DU_CRF_Task.__init__(self
, sModelName, sModelDir
, dFeatureConfig = {'row_row':{}, 'row_gh':{},
'gh_row':{}, 'gh_gh':{},
'gh':{}, 'row':{}}
, dLearnerConfig = {
'C' : .1 if C is None else C
, 'njobs' : 4 if njobs is None else njobs
, 'inference_cache' : 50 if inference_cache is None else inference_cache
#, 'tol' : .1
, 'tol' : .05 if tol is None else tol
, 'save_every' : 50 #save every 50 iterations,for warm start
, 'max_iter' : 10 if max_iter is None else max_iter
}
, sComment=sComment
#,cFeatureDefinition=FeatureDefinition_PageXml_StandardOnes_noText
,cFeatureDefinition=My_FeatureDefinition_v2
)
# if options.bBaseline:
# self.bsln_mdl = self.addBaseline_LogisticRegression() #use a LR model trained by GridSearch as baseline
#=== END OF CONFIGURATION =============================================================
# def predict(self, lsColDir):
# """
# Return the list of produced files
# """
# self.sXmlFilenamePattern = "*.mpxml"
# return DU_CRF_Task.predict(self, lsColDir)
#
# def runForExternalMLMethod(self, lsColDir, storeX, applyY, bRevertEdges=False):
# """
# Return the list of produced files
# """
# self.sXmlFilenamePattern = "*.mpxml"
# return DU_CRF_Task.runForExternalMLMethod(self, lsColDir, storeX, applyY, bRevertEdges)
# ----------------------------------------------------------------------------
def main(sModelDir, sModelName, options):
doer = DU_ABPTableRG4(sModelName, sModelDir,
iGridStep_H = options.iGridStep_H,
iGridStep_V = options.iGridStep_V,
iGridVisibility = options.iGridVisibility,
iBlockVisibility = options.iBlockVisibility,
C = options.crf_C,
tol = options.crf_tol,
njobs = options.crf_njobs,
max_iter = options.max_iter,
inference_cache = options.crf_inference_cache)
if options.rm:
doer.rm()
return
lTrn, lTst, lRun, lFold = [_checkFindColDir(lsDir, bAbsolute=False) for lsDir in [options.lTrn, options.lTst, options.lRun, options.lFold]]
# if options.bAnnotate:
# doer.annotateDocument(lTrn)
# traceln('annotation done')
# sys.exit(0)
traceln("- classes: ", doer.getGraphClass().getLabelNameList())
## use. a_mpxml files
#doer.sXmlFilenamePattern = doer.sLabeledXmlFilenamePattern
if options.iFoldInitNum or options.iFoldRunNum or options.bFoldFinish:
if options.iFoldInitNum:
"""
initialization of a cross-validation
"""
splitter, ts_trn, lFilename_trn = doer._nfold_Init(lFold, options.iFoldInitNum, test_size=0.25, random_state=None, bStoreOnDisk=True)
elif options.iFoldRunNum:
"""
Run one fold
"""
oReport = doer._nfold_RunFoldFromDisk(options.iFoldRunNum, options.warm, options.pkl)
traceln(oReport)
elif options.bFoldFinish:
tstReport = doer._nfold_Finish()
traceln(tstReport)
else:
assert False, "Internal error"
#no more processing!!
exit(0)
#-------------------
if lFold:
loTstRpt = doer.nfold_Eval(lFold, 3, .25, None, options.pkl)
import graph.GraphModel
sReportPickleFilename = os.path.join(sModelDir, sModelName + "__report.txt")
traceln("Results are in %s"%sReportPickleFilename)
graph.GraphModel.GraphModel.gzip_cPickle_dump(sReportPickleFilename, loTstRpt)
elif lTrn:
doer.train_save_test(lTrn, lTst, options.warm, options.pkl)
try: traceln("Baseline best estimator: %s"%doer.bsln_mdl.best_params_) #for GridSearch
except: pass
traceln(" --- CRF Model ---")
traceln(doer.getModel().getModelInfo())
elif lTst:
doer.load()
tstReport = doer.test(lTst)
traceln(tstReport)
if options.bDetailedReport:
traceln(tstReport.getDetailledReport())
sReportPickleFilename = os.path.join(sModelDir, sModelName + "__detailled_report.txt")
graph.GraphModel.GraphModel.gzip_cPickle_dump(sReportPickleFilename, tstReport)
if lRun:
if options.storeX or options.applyY:
try: doer.load()
except: pass #we only need the transformer
lsOutputFilename = doer.runForExternalMLMethod(lRun, options.storeX, options.applyY, options.bRevertEdges)
else:
doer.load()
lsOutputFilename = doer.predict(lRun)
traceln("Done, see in:\n %s"%lsOutputFilename)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailled reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iGridVisibility', action="store",
type=int, default=2,
help="seeline2line: how many next grid lines does one line see?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store",
type=int, default=2,
help="seeblock2line: how many next grid lines does one block see?")
parser.add_option("--grid_h", dest='iGridStep_H', action="store", type=int,
default=GraphGrid.iGridStep_H,
help="Grid horizontal step")
parser.add_option("--grid_v", dest='iGridStep_V', action="store", type=int,
default=GraphGrid.iGridStep_V,
help="Grid Vertical step")
# ---
#parse the command line
(options, args) = parser.parse_args()
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(sModelDir, sModelName, options) | bsd-3-clause |
smarden1/airflow | airflow/www/app.py | 2 | 63047 | import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT))
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(i, v)
for i, v in df[col].iteritems() if not np.isnan(v)]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [State.SUCCESS, State.RUNNING, State.FAILED]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect('/admin/dagmodel/')
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
if not base_date:
base_date = datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
from_time = datetime.min.time()
if dag.start_date:
from_time = dag.start_date.time()
from_date = (base_date-(num_runs * dag.schedule_interval)).date()
from_date = datetime.combine(from_date, from_time)
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
def recurse_nodes(task):
children = [recurse_nodes(t) for t in task.upstream_list]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0])
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/dagmodel/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), (
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds()/(60*60)
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port')
form_overrides = dict(password=VisiblePasswordField)
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
]
}
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
f = open(configuration.AIRFLOW_CONFIG, 'r')
config = f.read()
f.close()
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print bp
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| apache-2.0 |
pgleeson/TestArea | models/Parallel/pythonScripts/perf_ml.py | 1 | 2956 | import matplotlib.pyplot as plt
from pylab import *
print "Going to plot performance of simulations on multiple machines"
T_time = 1000
times_L = {}
times_L[1] = 1.8823334*60.
times_L[2] = 53.07
times_L[4] = 26.95
times_L[8] = 13.98
times_L[16] = 7.31
times_L[32] = 4.36
for i in times_L.keys():
times_L[i] = 1000 * times_L[i] / T_time
times_E = {}
times_E[1]=1.5285*60.
times_E[2]=51.31
times_E[3]=44.57
times_E[4]=40.7
for i in times_E.keys():
times_E[i] = 1000 * times_E[i] / T_time
times_B = {}
times_B[1] = 3.35*60.
times_B[2] = 1.354*60.
times_B[3] = 51.49
times_B[4] = 40.37
for i in times_B.keys():
times_B[i] = 1000 * times_B[i] / T_time
times_M = {}
times_M[1] = 82.73
times_M[4] = 19.87
times_M[8] = 9.65
for i in times_M.keys():
times_M[i] = 1000 * times_M[i] / T_time
times_Lm = {}
times_Lm[1] = 107.62
times_Lm[4] = 25.7
times_Lm[8] = 12.62
for i in times_Lm.keys():
times_Lm[i] = 1000 * times_Lm[i] / T_time
times_L_ideal = {}
for i in times_L.keys():
proc_norm = min(times_L.keys())
times_L_ideal[i] = proc_norm * times_L[proc_norm]/i
times_B_ideal = {}
for i in times_B.keys():
proc_norm = min(times_B.keys())
times_B_ideal[i] = proc_norm * times_B[proc_norm]/i
times_E_ideal = {}
for i in times_E.keys():
proc_norm = min(times_E.keys())
times_E_ideal[i] = proc_norm * times_E[proc_norm]/i
times_M_ideal = {}
for i in times_M.keys():
proc_norm = min(times_M.keys())
times_M_ideal[i] = proc_norm * times_M[proc_norm]/i
times_Lm_ideal = {}
for i in times_Lm.keys():
proc_norm = min(times_Lm.keys())
times_Lm_ideal[i] = proc_norm * times_Lm[proc_norm]/i
def getXvals(times):
x = times.keys()
x.sort()
return x
def getYvals(times):
x = times.keys()
x.sort()
y = []
for t in x:
y.append(times[t])
return y
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
fig = plt.figure()
p = fig.add_subplot(111)
lines = p.loglog(getXvals(times_L), getYvals(times_L), 'ro-', getXvals(times_L_ideal), getYvals(times_L_ideal), 'r:', \
getXvals(times_E), getYvals(times_E), 'bo-', getXvals(times_E_ideal), getYvals(times_E_ideal), 'b:', \
getXvals(times_B), getYvals(times_B), 'go-', getXvals(times_B_ideal), getYvals(times_B_ideal), 'g:', \
getXvals(times_M), getYvals(times_M), 'ko-', getXvals(times_M_ideal), getYvals(times_M_ideal), 'k:', \
getXvals(times_Lm), getYvals(times_Lm), 'mo-', getXvals(times_Lm_ideal), getYvals(times_Lm_ideal), 'm:')
p.set_ylabel('Simulation time for 1 sec of net activity (sec)', fontsize=14)
p.set_xlabel('Number of processors', fontsize=14)
lines[0].set_label('Legion')
lines[2].set_label('PadraigPC')
lines[4].set_label('Bernal')
lines[6].set_label('Matthau')
lines[8].set_label('Lemmon')
legend()
fig.set_figheight(8)
fig.set_figwidth(12)
#plt.print_figure()
canvas = FigureCanvas(fig)
canvas.print_eps('Performance.eps')
print dir(fig)
plt.show()
| gpl-2.0 |
walterreade/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
MartinSavc/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
danny200309/BuildingMachineLearningSystemsWithPython | ch02/figure1.py | 22 | 1199 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from matplotlib import pyplot as plt
# We load the data with load_iris from sklearn
from sklearn.datasets import load_iris
# load_iris returns an object with several fields
data = load_iris()
features = data.data
feature_names = data.feature_names
target = data.target
target_names = data.target_names
fig,axes = plt.subplots(2, 3)
pairs = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
# Set up 3 different pairs of (color, marker)
color_markers = [
('r', '>'),
('g', 'o'),
('b', 'x'),
]
for i, (p0, p1) in enumerate(pairs):
ax = axes.flat[i]
for t in range(3):
# Use a different color/marker for each class `t`
c,marker = color_markers[t]
ax.scatter(features[target == t, p0], features[
target == t, p1], marker=marker, c=c)
ax.set_xlabel(feature_names[p0])
ax.set_ylabel(feature_names[p1])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.savefig('figure1.png')
| mit |
toastedcornflakes/scikit-learn | doc/sphinxext/gen_rst.py | 27 | 40192 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gpr_co2_001.png': (1, 350),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
pschella/scipy | scipy/signal/filter_design.py | 14 | 135076 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=None):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/utils/random.py | 8 | 7444 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
from .deprecation import deprecated
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
@deprecated("sklearn.utils.random.choice was deprecated in version 0.19 "
"and will be removed in 0.21. Use np.random.choice or "
"np.random.RandomState.choice instead.")
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
if random_state is not None:
random_state = check_random_state(random_state)
return random_state.choice(a, size, replace, p)
else:
return np.random.choice(a, size, replace, p)
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
huletlab/PyAbel | doc/conf.py | 2 | 10541 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyAbel documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 13 17:11:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import six
import os
import shlex
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# Scipy and Numpy packages cannot be installed in on readthedocs.io
# https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
if six.PY3:
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
# MOCK_MODULES = ['numpy', 'scipy', 'scipy.special', 'numpy.linalg', 'scipy.ndimage', 'scipy.ndimage.interpolation',
# 'scipy.linalg', 'scipy.integrate', 'scipy.optimize']
#
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyAbel'
copyright = '2016, PyAbel team'
author = 'PyAbel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyAbeldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyAbel.tex', 'PyAbel Documentation',
'PyAbel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyabel', 'PyAbel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyAbel', 'PyAbel Documentation',
author, 'PyAbel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
required_symlinks = [
('examples', '../examples/')
]
autodoc_member_order = 'bysource'
# supress the "nonlocal image URI found" warnings
import sphinx.environment
from docutils.utils import get_source_line
def _warn_node(self, msg, node):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
| mit |
boada/HETDEXCluster | testing/snippets/rejectOutliers_group.py | 2 | 4157 | import glob
import pandas as pd
import numpy as np
from astLib import astCoords as aco
from astLib import astStats as ast
from astLib import astCalc as aca
c = 2.99E5 # speed of light in km/s
def findSeperationSpatial(data, center):
''' Finds the distance to all of the galaxies from the center of the
cluster in the spatial plane. Returns values in Mpc.
'''
# Add a new column to the dataframe
data['seperation'] = 0.0
for row in data.iterrows():
sepDeg = aco.calcAngSepDeg(center[0], center[1], row[1]['ra'],
row[1]['dec'])
sepMpc = sepDeg * aca.da(row[1]['redshift'])/57.2957795131
data['seperation'][row[0]] = sepMpc
return data
def findClusterCenterRedshift(data):
''' Finds the center of the cluster in redshift space using the
biweightlocation estimator.
'''
x = np.copy(data['redshift'].values)
return ast.biweightLocation(x, tuningConstant=6.0)
def findLOSV(data):
''' Finds the line of sight velocity for each of the galaxies.
'''
c = 2.99E5 # speed of light in km/s
avgz = findClusterCenterRedshift(data)
# Add a new column to the dataframe
data['LOSV'] = 0.0
for row in data.iterrows():
data['LOSV'][row[0]] = c *(row[1]['redshift'] - avgz)/(1 + avgz)
return data
def split_list(alist, wanted_parts=1):
''' Breaks a list into a number of parts. If it does not divide evenly then
the last list will have an extra element.
'''
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def rejectInterlopers(data):
''' Does all of the work to figure out which galaxies don't belong. Makes
several sorted copies of the dataframe and then applies the fixed gapper
method.
'''
# make some copies so we can sort them around
sepSorted = data.sort('seperation', ascending=True)
# How many parts to break into
parts = len(data)//15
splitData = split_list(data, parts)
# Now we sort the parts by LOSV and find the rejects
interlopers = []
for part in splitData:
# sort by LOSV
LOSVsorted = part.sort('LOSV', ascending=True)
rejected = True
while rejected:
# Find the difference between all of the neighboring elements
difference = np.diff(LOSVsorted['LOSV'])
# If diff > 1000 reject
rejects = abs(difference) > 1000
# Now remove those items
indices = np.where(rejects == True)
#print LOSVsorted['LOSV']
#print difference
#print indices[0]
if rejects.any() == True:
# Always take the more extreme index
for index, i in enumerate(indices[0]):
if (abs(LOSVsorted['LOSV'][LOSVsorted.index[i]]) -
abs(LOSVsorted['LOSV'][LOSVsorted.index[i+1]])) > 0:
pass
elif (abs(LOSVsorted['LOSV'][LOSVsorted.index[i]]) -
abs(LOSVsorted['LOSV'][LOSVsorted.index[i+1]])) < 0:
indices[0][index] = i+1
#print LOSVsorted.index[list(indices[0])]
dataframeIndex = list(LOSVsorted.index[list(indices[0])])
LOSVsorted = LOSVsorted.drop(dataframeIndex)
interlopers += dataframeIndex
else:
rejected = False
print 'interlopers',interlopers
return data.drop(interlopers)
def rejectInterlopers_group(data, sigmav=500)
''' This method is given in Wilman 2005 and Connelly 2012 and talked about
in the draft of the cluster paper. It doesn't look like it is full fleshed
out.
'''
deltaZmax = 2 * simgav / c
avgz = findClusterCenterRedshift(data)
deltaRmax = (c * deltaZmax)/(10*(1 + avgz)*aca.H0*aca.Ez(avgz)) # 1/Mpc
deltaThetamax = 206265 * deltaRmax * aca.da(avgz) # arcseconds
center = 191.1050125, 16.7966666667
seperated = findSeperationSpatial(matched, center)
losv = findLOSV(seperated)
cleaned = rejectInterlopers(losv)
| mit |
mmisamore/intro-data-science | chapter2/maxTemp.py | 1 | 1394 | import pandas
import pandasql
def max_temp_aggregate_by_fog(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return two columns and
two rows - whether it was foggy or not (0 or 1) and the max
maxtempi for that fog value (i.e., the maximum max temperature
for both foggy and non-foggy days). The dataframe will be
titled 'weather_data'. You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
select fog, max(maxtempi)
from weather_data
group by fog
"""
#Execute your SQL command against the pandas frame
foggy_days = pandasql.sqldf(q.lower(), locals())
return foggy_days
print max_temp_aggregate_by_fog('weather_underground.csv')
| mit |
pprett/scikit-learn | sklearn/exceptions.py | 50 | 5276 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'SkipTestWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class SkipTestWarning(UserWarning):
"""Warning class used to notify the user of a test that was skipped.
For example, one of the estimator checks requires a pandas import.
If the pandas package cannot be imported, the test will be skipped rather
than register as a failure.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| bsd-3-clause |
DmitryOdinoky/sms-tools | lectures/08-Sound-transformations/plots-code/stftMorph-frame.py | 21 | 2700 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy.signal import hamming, resample
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import math
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .1
balancef = .7
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1+1)/2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1/2)) # half analysis window size by floor
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2+1)/2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2/2)) # half analysis window size by floor2
loc1 = 14843
loc2 = 9294
x1 = x1[loc1-hM1_1:loc1+hM1_2]
x2 = x2[loc2-hM2_1:loc2+hM2_2]
mX1, pX1 = DFT.dftAnal(x1, w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2, w2, N2) # compute dft
# morph
mX2smooth = resample(np.maximum(-200, mX2), mX2.size*smoothf) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX2.size)
mY = balancef * mX2 + (1-balancef) * mX1 # generate output spectrum
#-----synthesis-----
y = DFT.dftSynth(mY, pX1, M1) * sum(w1) # overlap-add to generate output sound
mY1, pY1 = DFT.dftAnal(y, w1, M1) # overlap-add to generate output sound
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N1)/float(fs), x1*w1, 'b', lw=1.5)
plt.axis([0, N1/float(fs), min(x1*w1), max(x1*w1)])
plt.title('x1 (orchestra.wav)')
plt.subplot(323)
plt.plot(fs*np.arange(mX1.size)/float(mX1.size), mX1-max(mX1), 'r', lw=1.5, label = 'mX1')
plt.plot(fs*np.arange(mX2.size)/float(mX2.size), mX2-max(mX2), 'k', lw=1.5, label='mX2')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-70,2])
plt.title('mX1 + mX2 (speech-male.wav)')
plt.subplot(325)
plt.plot(fs*np.arange(pX1.size)/float(pX1.size), pX1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX1),20])
plt.title('pX1')
plt.subplot(322)
plt.plot(np.arange(N1)/float(fs), y, 'b', lw=1.5)
plt.axis([0, float(N1)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1-max(mY1), 'r', lw=1.5)
plt.axis([0,fs/4.0,-70,2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY1.size)/float(pY1.size), pY1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY1),6])
plt.title('pY')
plt.tight_layout()
plt.savefig('stftMorph-frame.png')
plt.show()
| agpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_compat.py | 9 | 2357 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (
range, zip, map, filter,
lrange, lzip, lmap, lfilter,
builtins
)
import unittest
import nose
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected, lengths):
self.assertNotIsInstance(iter_res, list)
tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| apache-2.0 |
trungnt13/dnntoolkit | tests/trainer.py | 1 | 6911 | from __future__ import print_function, division
import os
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32"
import theano
from theano import tensor
import numpy as np
import scipy as sp
import dnntoolkit
import lasagne
from matplotlib import pyplot as plt
np.random.seed(dnntoolkit.MAGIC_SEED)
# ======================================================================
# Global
# ======================================================================
W_saved = None
W_rollbacked = None
dnntoolkit.logger.set_save_path('tmp/log.txt')
# ======================================================================
# data
# ======================================================================
ds = dnntoolkit.dataset.load_mnist()
dnntoolkit.logger.log(ds)
# ======================================================================
# Model
# ======================================================================
def ffnet(indim, outdim):
outdim = int(outdim)
l_in = lasagne.layers.InputLayer(shape=(None,) + indim)
l_in = lasagne.layers.FlattenLayer(l_in)
l_in = lasagne.layers.DropoutLayer(l_in, p=0.3)
l_hid = lasagne.layers.DenseLayer(l_in, num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
l_hid = lasagne.layers.DropoutLayer(l_hid, p=0.3)
return lasagne.layers.DenseLayer(l_hid, num_units=outdim,
nonlinearity=lasagne.nonlinearities.softmax)
# ======================================================================
# Load data
# ======================================================================
m = dnntoolkit.model('tmp/tmp.ai')
m.set_model(ffnet, api='lasagne',
indim=ds['X_train'].shape[1:],
outdim=ds['y_train'].shape[1])
net = m.create_model()
y = tensor.matrix(name='y', dtype=theano.config.floatX)
input_var = [l.input_var for l in lasagne.layers.find_layers(net, types=lasagne.layers.InputLayer)]
dropout = lasagne.layers.find_layers(net, types=lasagne.layers.DropoutLayer)
# ====== Create prediction ====== #
y_pred_deter = lasagne.layers.get_output(net, deterministic=True)
f_pred = theano.function(
inputs=input_var,
outputs=y_pred_deter,
allow_input_downcast=True
)
dnntoolkit.logger.info('Built prediction function!')
# ====== Create accuracy ====== #
cost_monitor = lasagne.objectives.categorical_accuracy(y_pred_deter, y).mean()
f_cost = theano.function(
inputs=input_var + [y],
outputs=cost_monitor,
allow_input_downcast=True
)
dnntoolkit.logger.info('Built cost function!')
# ====== Create training ====== #
y_pred_stoch = lasagne.layers.get_output(net, deterministic=False)
cost_train = lasagne.objectives.categorical_crossentropy(y_pred_stoch, y).mean()
params = lasagne.layers.get_all_params(net)
lr = dnntoolkit.tensor.shared_scalar(name='lr', val=0.001)
updates = lasagne.updates.sgd(cost_train, params, lr)
f_updates = theano.function(
inputs=input_var + [y],
outputs=cost_train,
updates=updates,
allow_input_downcast=True
)
dnntoolkit.logger.info('Built updates function!')
# ======================================================================
# Train
# ======================================================================
trainer = dnntoolkit.trainer()
trainer.set_dataset(ds,
train=['X_train', 'y_train'],
valid=['X_valid', 'y_valid'],
test=['X_test', 'y_test'])
trainer.set_model(cost_func=f_cost, updates_func=f_updates)
trainer.set_strategy(
task='train',
data={'train': ['X_train', 'y_train'], 'valid': ['X_valid', 'y_valid']},
epoch=50,
batch=256,
shuffle=True,
validfreq=0.6,
seed=dnntoolkit.MAGIC_SEED
).set_strategy(
task='test',
batch=256
)
# ==================== Callback ==================== #
def epoch_end(trainer):
m.record(np.mean(trainer.cost), trainer.task, 'epoch_end')
# ====== Visual weights ====== #
plt.close('all')
weights = m.get_weights()
nrows = int(np.ceil(np.sqrt(len(weights))))
ncols = nrows
fig = plt.figure()
for i, w in enumerate(weights[:-1]):
ax = fig.add_subplot(nrows, ncols, i + 1)
dnntoolkit.visual.plot_weights(w, ax)
ax = fig.add_subplot(nrows, ncols, i + 2)
dnntoolkit.visual.plot_weights(weights[-1], ax, colorbar='all',
path='img/W_%d.png' % trainer.epoch)
def batch_end(trainer):
m.record(np.mean(trainer.cost), trainer.task, 'batch_end')
def valid_end(trainer):
m.record(np.mean(trainer.cost), 'valid_end')
cost = [1 - i for i in m.select('valid_end')]
shouldSave, shoudlStop = dnntoolkit.dnn.earlystop(
cost, generalization_loss=True, threshold=3)
if shouldSave:
# dnntoolkit.logger.info('\nShould save!')
m.save()
global W_saved
W_saved = [i.astype(np.float32) for i in m.get_weights()]
if shoudlStop:
# dnntoolkit.logger.info('\nShould stop!')
trainer.stop()
def train_end(trainer):
m.record(np.mean(trainer.cost), 'train_end')
def test_start(trainer):
m.rollback() # rollback to best saved version of AI
global W_rollbacked
W_rollbacked = [i.astype(np.float32) for i in m.get_weights()]
def test_end(trainer):
m.record(np.mean(trainer.cost), 'test_end')
trainer.set_callback(epoch_end=epoch_end, batch_end=batch_end,
train_end=train_end, valid_end=valid_end,
test_start=test_start, test_end=test_end)
# ==================== Start now ==================== #
print(trainer)
trainer.run()
# trainer.set_strategy(yaml='tmp.yaml')
# print(trainer)
# trainer.run()
# ======================================================================
# Test load model
# ======================================================================
m = dnntoolkit.model.load('tmp/tmp.ai')
net = m.create_model()
W = m.get_weights()
dnntoolkit.logger.critical('******* Compare to best saved weights: ********')
for i, j in zip(W, W_saved):
dnntoolkit.logger.critical('W differences: %.4f' % (np.sum(i - j)))
if W_rollbacked is not None:
dnntoolkit.logger.critical('******* Compare to rollbacked weights: ********')
for i, j in zip(W, W_rollbacked):
dnntoolkit.logger.critical('W differences: %.4f' % (np.sum(i - j)))
# ====== Test prediction ====== #
test_pred = m.pred(ds['X_test'][:])
test_pred = np.argmax(test_pred, axis=1)
test_true = np.argmax(ds['y_test'][:], axis=1)
hit = np.sum(test_pred == test_true)
dnntoolkit.logger.critical('Test accuracy: %.4f' % (hit / len(test_true)))
# ====== Some training information ====== #
dnntoolkit.logger.info('Epoch cost:')
dnntoolkit.visual.print_bar(m.select(['epoch_end', 'train']), bincount=50)
dnntoolkit.logger.info('Validation accuracy:')
dnntoolkit.visual.print_bar(m.select(['valid_end']), bincount=50)
# ======================================================================
# End
# ======================================================================
ds.close()
| apache-2.0 |
nixingyang/Kaggle-Face-Verification | Gene Expression Prediction/solution_LightGBM.py | 1 | 5773 | import os
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV, StratifiedShuffleSplit
# Dataset
DATASET_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset/Gene Expression Prediction")
MODEL_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "model")
SUBMISSION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "submission")
def load_dataset():
# Read csv files
x_train = pd.read_csv(os.path.join(DATASET_FOLDER_PATH, "train/x_train.csv")).as_matrix()
x_test = pd.read_csv(os.path.join(DATASET_FOLDER_PATH, "test/x_test.csv")).as_matrix()
y_train = pd.read_csv(os.path.join(DATASET_FOLDER_PATH, "train/y_train.csv")).as_matrix()
# Remove the first column
x_train = x_train[:, 1:]
x_test = x_test[:, 1:]
y_train = y_train[:, 1:]
# Every 100 rows correspond to one gene. Extract all 100-row-blocks into a list using np.split.
num_genes_train = x_train.shape[0] / 100
num_genes_test = x_test.shape[0] / 100
x_train = np.split(x_train, num_genes_train)
x_test = np.split(x_test, num_genes_test)
# Reshape by raveling each 100x5 array into a 500-length vector
x_train = [g.ravel() for g in x_train]
x_test = [g.ravel() for g in x_test]
# Convert data from list to array
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_train = np.ravel(y_train)
return x_train, y_train, x_test
def run():
print("Creating folder ...")
os.makedirs(MODEL_FOLDER_PATH, exist_ok=True)
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
print("Loading dataset ...")
x_train, y_train, x_test = load_dataset()
print("Performing parameter optimization ...")
# estimator = lgb.LGBMClassifier()
# param_grid = {
# "num_leaves": [31, 63, 127, 255],
# "learning_rate": [0.01, 0.05, 0.1],
# "n_estimators": [50, 100, 200],
# "subsample" : [0.8, 0.9, 1],
# "colsample_bytree" : [0.8, 0.9, 1],
# "reg_alpha" : [0, 0.1, 0.5],
# "objective" : ["binary"]
# }
# randomizedsearch_object = RandomizedSearchCV(estimator, param_grid, n_iter=100, cv=5, scoring="roc_auc", refit=False, verbose=3)
# randomizedsearch_object.fit(x_train, y_train)
# print("Best score is: {}".format(randomizedsearch_object.best_score_))
# print("Best parameters are: {}".format(randomizedsearch_object.best_params_))
# Best score is: 0.9176406673636928
# Best parameters are: {'subsample': 0.9, 'reg_alpha': 0, 'objective': 'binary', 'num_leaves': 255, 'n_estimators': 200, 'learning_rate': 0.05, 'colsample_bytree': 0.9}
best_params = {"num_leaves": 255, "learning_rate": 0.05, "subsample": 0.9, "colsample_bytree": 0.9, "reg_alpha": 0, "objective":"binary", "metric":"auc"}
prediction_array_list = []
cv_object = StratifiedShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
for cv_index, (train_index, valid_index) in enumerate(cv_object.split(x_train, y_train), start=1):
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "submission_{}.csv".format(cv_index))
if not os.path.isfile(submission_file_path):
model_file_path = os.path.join(MODEL_FOLDER_PATH, "model_{}.txt".format(cv_index))
if not os.path.isfile(model_file_path):
train_data = lgb.Dataset(x_train[train_index], label=y_train[train_index])
validation_data = lgb.Dataset(x_train[valid_index], label=y_train[valid_index], reference=train_data)
model = lgb.train(params=best_params, train_set=train_data, num_boost_round=1000000, valid_sets=[validation_data], early_stopping_rounds=100)
model.save_model(model_file_path, num_iteration=model.best_iteration)
assert os.path.isfile(model_file_path)
print("Loading weights from {}".format(os.path.basename(model_file_path)))
model = lgb.Booster(model_file=model_file_path)
# Generate prediction
prediction_array = np.expand_dims(model.predict(x_test), axis=-1)
prediction_array_list.append(prediction_array)
# Save prediction to disk
submission_file_content = pd.DataFrame(np.hstack((np.expand_dims(np.arange(len(prediction_array)), axis=-1) + 1, prediction_array)), columns=["GeneId", "Prediction"])
submission_file_content.GeneId = submission_file_content.GeneId.astype(int)
submission_file_content.to_csv(submission_file_path, index=False)
print("Submission saved at {}".format(submission_file_path))
else:
# Load prediction
prediction_array = np.expand_dims(pd.read_csv(submission_file_path).as_matrix()[:, 1], axis=-1)
prediction_array_list.append(prediction_array)
# Ensemble predictions
for ensemble_func, ensemble_func_name in zip([np.max, np.min, np.mean, np.median], ["max", "min", "mean", "median"]):
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "submission_{}.csv".format(ensemble_func_name))
if not os.path.isfile(submission_file_path):
prediction_array = ensemble_func(prediction_array_list, axis=0)
# Save prediction to disk
submission_file_content = pd.DataFrame(np.hstack((np.expand_dims(np.arange(len(prediction_array)), axis=-1) + 1, prediction_array)), columns=["GeneId", "Prediction"])
submission_file_content.GeneId = submission_file_content.GeneId.astype(int)
submission_file_content.to_csv(submission_file_path, index=False)
print("Submission saved at {}".format(submission_file_path))
print("All done!")
if __name__ == "__main__":
run()
| mit |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/core/series.py | 1 | 98095 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import types
import warnings
from collections import MutableMapping
from numpy import nan, ndarray
import numpy as np
import numpy.ma as ma
from pandas.core.common import (isnull, notnull, is_bool_indexer,
_default_index, _maybe_upcast,
_asarray_tuplesafe, _infer_dtype_from_scalar,
is_list_like, _values_from_object,
is_categorical_dtype, needs_i8_conversion,
i8_boxer, _possibly_cast_to_datetime,
_possibly_castable, _possibly_convert_platform,
_try_sort, is_internal_type, is_datetimetz,
_maybe_match_name, ABCSparseArray,
_coerce_to_dtype, SettingWithCopyError,
_maybe_box_datetimelike, ABCDataFrame,
_dict_compat)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
Float64Index, _ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical, CategoricalAccessor
import pandas.core.strings as strings
from pandas.tseries.common import (maybe_to_datetimelike,
CombinedDatetimelikeProperties)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex, Period
from pandas import compat
from pandas.util.terminal import get_terminal_size
from pandas.compat import zip, u, OrderedDict, StringIO
import pandas.core.ops as ops
from pandas.core import algorithms
import pandas.core.common as com
import pandas.core.datetools as datetools
import pandas.core.format as fmt
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, deprecate_kwarg
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from numpy import percentile as _quantile
from pandas.core.config import get_option
from pandas import _np_version_under1p9
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index', klass='Series', axes_single_arg="{0, 'index'}",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
duplicated='Series')
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to "
"{0}".format(str(converter)))
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, strings.StringAccessorMixin,
generic.NDFrame,):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be any hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN)
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
Values must be unique and hashable, same length as data. Index
object (or other iterable of same length as data) Will default to
RangeIndex(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
if len(data):
# coerce back to datetime objects for lookup
data = _dict_compat(data)
data = lib.fast_multiget(data, index.astype('O'),
default=np.nan)
else:
data = np.nan
# GH #12169
elif isinstance(index, (PeriodIndex, TimedeltaIndex)):
data = ([data.get(i, nan) for i in index]
if data else np.nan)
else:
data = lib.fast_multiget(data, index.values,
default=np.nan)
except TypeError:
data = ([data.get(i, nan) for i in index]
if data else np.nan)
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
if dtype is not None:
raise ValueError("cannot specify a dtype with a "
"Categorical")
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
object.__setattr__(self, 'name', name)
self._set_axis(0, index, fastpath=True)
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
# return a sparse series here
if isinstance(arr, ABCSparseArray):
from pandas.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype, copy=copy,
fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
@property
def is_time_series(self):
warnings.warn("is_time_series is deprecated. Please use "
"Series.index.is_all_dates", FutureWarning, stacklevel=2)
# return self._subtyp in ['time_series', 'sparse_time_series']
return self.index.is_all_dates
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = _ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
if fastpath:
self._data.set_axis(axis, labels)
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray or ndarray-like
depending on the dtype
Returns
-------
arr : numpy.ndarray or ndarray-like
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
[a, a, b, c]
Categories (3, object): [a, b, c]
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
tz='US/Eastern')).values
array(['2013-01-01T00:00:00.000000000-0500',
'2013-01-02T00:00:00.000000000-0500',
'2013-01-03T00:00:00.000000000-0500'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@property
def _values(self):
""" return the internal repr of this data """
return self._data.internal_values()
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
def compress(self, condition, axis=0, out=None, **kwargs):
"""
Return selected slices of an array along given axis as a Series
See also
--------
numpy.ndarray.compress
"""
return self[condition]
def nonzero(self):
"""
Return the indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatability with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self._values.nonzero()
def put(self, *args, **kwargs):
"""
return a ndarray with the values put
See also
--------
numpy.ndarray.put
"""
self._values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if context is not None and not isinstance(self._values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(
obj=type(obj).__name__,
dtype=getattr(obj, 'dtype', None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
"""Return a list of the row axis labels"""
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return _index.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return _index.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj,
kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
try:
result = self.index.get_value(self, key)
if not lib.isscalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if we have a non-unique index here
# otherwise have inline ndarray/lists
if not self.index.is_unique:
result = self._constructor(
result, index=[key] * len(result),
dtype=self.dtype).__finalize__(self)
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,
kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not '
'supported, use the appropriate DataFrame column')
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
else:
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.ix[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if any(k is None for k in key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self._values[indexer]
def __setitem__(self, key, value):
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
values = self._values
if (com.is_integer(key) and
not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif is_bool_indexer(key):
pass
elif com.is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isnull(value):
value = tslib.iNaT
try:
self.index._engine.set_value(self._values, key,
value)
return
except TypeError:
pass
self.loc[key] = value
return
except TypeError as e:
if (isinstance(key, tuple) and
not isinstance(self.index, MultiIndex)):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if 'unorderable' in str(e): # pragma: no cover
raise IndexError(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self.where(~key, value, inplace=True)
return
except InvalidIndexError:
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = _asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index' % str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def repeat(self, reps):
"""
return a new Series with the values repeated reps times
See also
--------
numpy.ndarray.repeat
"""
new_index = self.index.repeat(reps)
new_values = self._values.repeat(reps)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def reshape(self, *args, **kwargs):
"""
return an ndarray with the values shape
if the specified shape matches exactly the current shape, then
return self (for compat)
See also
--------
numpy.ndarray.take
"""
if len(args) == 1 and hasattr(args[0], '__iter__'):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
return self
return self._values.reshape(shape, **kwargs)
def iget_value(self, i, axis=0):
"""
DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
"""
warnings.warn("iget_value(i) is deprecated. Please use .iloc[i] or "
".iat[i]", FutureWarning, stacklevel=2)
return self._ixs(i)
def iget(self, i, axis=0):
"""
DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
"""
warnings.warn("iget(i) is deprecated. Please use .iloc[i] or .iat[i]",
FutureWarning, stacklevel=2)
return self._ixs(i)
def irow(self, i, axis=0):
"""
DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
"""
warnings.warn("irow(i) is deprecated. Please use .iloc[i] or .iat[i]",
FutureWarning, stacklevel=2)
return self._ixs(i)
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
if takeable is True:
return _maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
try:
if takeable:
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
return self
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Analogous to the :meth:`pandas.DataFrame.reset_index` function, see
docstring there.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns
name : object, default None
The name of the column corresponding to the Series values
inplace : boolean, default False
Modify the Series in place (do not create a new object)
Returns
----------
resetted : DataFrame, or Series if drop == True
"""
if drop:
new_index = _default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0 else
get_option("display.max_rows"))
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows)
result = buf.getvalue()
return result
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,
header=header, index=index, length=length,
dtype=dtype, name=name, max_rows=max_rows)
# catch contract violations
if not isinstance(the_repr, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(the_repr.__class__.__name__))
if buf is None:
return the_repr
else:
try:
buf.write(the_repr)
except AttributeError:
with open(buf, 'w') as f:
f.write(the_repr)
def _get_repr(self, name=False, header=True, index=True, length=True,
dtype=True, na_rep='NaN', float_format=None, max_rows=None):
"""
Internal function, should always return unicode string
"""
formatter = fmt.SeriesFormatter(self, name=name, length=length,
header=header, index=index,
dtype=dtype, na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# TODO: following check prob. not neces.
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
return result
def __iter__(self):
""" provide iteration over the values of the Series
box values if necessary """
if needs_i8_conversion(self.dtype):
boxer = i8_boxer(self)
return (boxer(x) for x in self._values)
else:
return iter(self._values)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
if compat.PY3: # pragma: no cover
items = iteritems
# ----------------------------------------------------------------------
# Misc public methods
def keys(self):
"""Alias for index"""
return self.index
def tolist(self):
""" Convert Series to a nested list """
return list(self)
def to_dict(self):
"""
Convert Series to {label -> value} dict
Returns
-------
value_dict : dict
"""
return dict(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind,
fill_value=fill_value).__finalize__(self)
def _set_name(self, name, inplace=False):
'''
Set the Series name.
Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
'''
ser = self if inplace else self.copy()
ser.name = name
return ser
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
from pandas.core.index import _get_na_value
if level is None:
return notnull(_values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
obs = lab[notnull(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev,
dtype='int64').__finalize__(self)
def mode(self):
"""Returns the mode(s) of the dataset.
Empty if nothing occurs at least 2 times. Always returns Series even
if only one value.
Parameters
----------
sort : bool, default True
If True, will lexicographically sort values, if False skips
sorting. Result ordering when ``sort=False`` is not defined.
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, keep='first'):
return super(Series, self).duplicated(keep=keep)
def idxmin(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of minimum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmin : Index of minimum of values
Notes
-----
This method is the Series version of ``ndarray.argmin``.
See Also
--------
DataFrame.idxmin
numpy.ndarray.argmin
"""
i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of maximum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmax : Index of maximum of values
Notes
-----
This method is the Series version of ``ndarray.argmax``.
See Also
--------
DataFrame.idxmax
numpy.ndarray.argmax
"""
i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = idxmin
argmax = idxmax
def round(self, decimals=0):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
numpy.around
"""
result = _values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
if _np_version_under1p9:
if interpolation != 'linear':
raise ValueError("Interpolation methods other than linear "
"are not supported in numpy < 1.9.")
def multi(values, qs, **kwargs):
if com.is_list_like(qs):
values = [_quantile(values, x * 100, **kwargs) for x in qs]
# let empty result to be Float64Index
qs = Float64Index(qs)
return self._constructor(values, index=qs, name=self.name)
else:
return _quantile(values, qs * 100, **kwargs)
kwargs = dict()
if not _np_version_under1p9:
kwargs.update({'interpolation': interpolation})
return self._maybe_box(lambda values: multi(values, q, **kwargs),
dropna=True)
def corr(self, other, method='pearson', min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : Series
"""
result = com.diff(_values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Lag-N autocorrelation
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
autocorr : float
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def searchsorted(self, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted Series `self` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Parameters
----------
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
Series.sort_values
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1])
array([1, 3])
"""
if sorter is not None:
sorter = com._ensure_platform_int(sorter)
return self._values.searchsorted(Series(v)._values, side=side,
sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.tools.merge import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=False,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer',
copy=False)
new_index = this.index
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isnull(this_vals)
other_mask = isnull(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
result = func(this_vals, other_vals)
name = _maybe_match_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
# When name is None, __finalize__ overwrites current name
result.name = None
return result
def combine(self, other, func, fill_value=nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
new_values[i] = func(lv, rv)
else:
new_index = self.index
new_values = func(self._values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
# TODO: do we need name?
name = _maybe_match_name(self, other) # noqa
rs_vals = com._where_compat(isnull(this), other._values, this._values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
"""
other = other.reindex_like(self)
mask = notnull(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
@Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
axis = self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
good = ~bad
idx = _default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
@Appender(generic._shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
sort_remaining=True):
axis = self._get_axis_number(axis)
index = self.index
if level is not None:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
indexer = _lexsort_indexer(index.labels, orders=ascending)
indexer = com._ensure_platform_int(indexer)
new_index = index.take(indexer)
else:
new_index, indexer = index.sort_values(return_indexer=True,
ascending=ascending)
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def sort(self, axis=0, ascending=True, kind='quicksort',
na_position='last', inplace=True):
"""
DEPRECATED: use :meth:`Series.sort_values(inplace=True)` for INPLACE
sorting
Sort values and index labels by value. This is an inplace sort by
default. Series.order is the equivalent but returns a new Series.
Parameters
----------
axis : int (can only be zero)
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default True
Do operation in place.
See Also
--------
Series.sort_values
"""
warnings.warn("sort is deprecated, use sort_values(inplace=True) for "
"INPLACE sorting", FutureWarning, stacklevel=2)
return self.sort_values(ascending=ascending, kind=kind,
na_position=na_position, inplace=inplace)
def order(self, na_last=None, ascending=True, kind='quicksort',
na_position='last', inplace=False):
"""
DEPRECATED: use :meth:`Series.sort_values`
Sorts Series object, by value, maintaining index-value link.
This will return a new Series by default. Series.sort is the equivalent
but as an inplace method.
Parameters
----------
na_last : boolean (optional, default=True)--DEPRECATED; use na_position
Put NaN's at beginning or end
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default False
Do operation in place.
Returns
-------
y : Series
See Also
--------
Series.sort_values
"""
warnings.warn("order is deprecated, use sort_values(...)",
FutureWarning, stacklevel=2)
return self.sort_values(ascending=ascending, kind=kind,
na_position=na_position, inplace=inplace)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self._values
mask = isnull(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name,
dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def nlargest(self, n=5, keep='first'):
"""Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
take_last : deprecated
Returns
-------
top_n : Series
The n largest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
See Also
--------
Series.nsmallest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
return algorithms.select_n(self, n=n, keep=keep, method='nlargest')
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def nsmallest(self, n=5, keep='first'):
"""Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
take_last : deprecated
Returns
-------
bottom_n : Series
The n smallest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
return algorithms.select_n(self, n=n, keep=keep, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
See Also
--------
Series.sort_index(level=...)
"""
return self.sort_index(level=level, ascending=ascending,
sort_remaining=sort_remaining)
def swaplevel(self, i, j, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order: list of int representing new level order.
(reference level by number or key)
axis: where to reorder levels
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values
Examples
--------
>>> x
one 1
two 2
three 3
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
Returns
-------
y : Series
same index as caller
"""
values = self._values
if needs_i8_conversion(values.dtype):
boxer = i8_boxer(values)
values = lib.map_infer(values, boxer)
if na_action == 'ignore':
mask = isnull(values)
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
new_values = com.take_1d(arg._values, indexer)
return self._constructor(new_values,
index=self.index).__finalize__(self)
else:
mapped = map_f(values, arg)
return self._constructor(mapped,
index=self.index).__finalize__(self)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> import pandas as pd
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
return f(self)
values = _values_from_object(self)
if needs_i8_conversion(values.dtype):
boxer = i8_boxer(values)
values = lib.map_infer(values, boxer)
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self._values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
if numeric_only:
raise NotImplementedError('Series.{0} does not implement '
'numeric_only.'.format(name))
return op(delegate, skipna=skipna, **kwds)
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _maybe_box(self, func, dropna=False):
"""
evaluate a function with possible input/output conversion if we are i8
Parameters
----------
dropna : bool, default False
whether to drop values if necessary
"""
if dropna:
values = self.dropna()._values
else:
values = self._values
if needs_i8_conversion(self):
boxer = i8_boxer(self)
if len(values) == 0:
return boxer(tslib.iNaT)
values = values.view('i8')
result = func(values)
if com.is_list_like(result):
result = result.map(boxer)
else:
result = boxer(result)
else:
# let the function return nan if appropriate
if dropna:
if len(values) == 0:
return np.nan
result = func(values)
return result
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
# be subclass-friendly
new_values = com.take_1d(self.get_values(), indexer)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(Series, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, **kwargs):
is_scalar_or_list = (
(not com.is_sequence(index) and not callable(index)) or
(com.is_list_like(index) and not isinstance(index, MutableMapping))
)
if is_scalar_or_list:
return self._set_name(index, inplace=kwargs.get('inplace'))
return super(Series, self).rename(index=index, **kwargs)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
return self.reindex(index=labels, **kwargs)
def memory_usage(self, index=True, deep=False):
"""Memory usage of the Series
Parameters
----------
index : bool
Specifies whether to include memory usage of Series index
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
scalar bytes of memory consumed
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
v = super(Series, self).memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
def take(self, indices, axis=0, convert=True, is_copy=False):
"""
return Series corresponding to requested indices
Parameters
----------
indices : list / array of ints
convert : translate negative to positive indices (default)
Returns
-------
taken : Series
See also
--------
numpy.ndarray.take
"""
# check/convert indicies here
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
indices = com._ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def isin(self, values):
"""
Return a boolean :class:`~pandas.Series` showing whether each element
in the :class:`~pandas.Series` is exactly contained in the passed
sequence of ``values``.
Parameters
----------
values : list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
``list`` of one element.
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If ``values`` is a string
See Also
--------
pandas.DataFrame.isin
Examples
--------
>>> s = pd.Series(list('abc'))
>>> s.isin(['a', 'c', 'e'])
0 True
1 False
2 True
dtype: bool
Passing a single string as ``s.isin('a')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['a'])
0 True
1 False
2 False
dtype: bool
"""
result = algorithms.isin(_values_from_object(self), values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right. NA values
will be treated as False
Parameters
----------
left : scalar
Left boundary
right : scalar
Right boundary
Returns
-------
is_between : Series
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a time Series.
This method only differs from :func:`pandas.read_csv` in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `header` is ``None`` instead of ``0`` (the first row is not used as
the column names)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used
to return a Series like ``from_csv``.
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default None
Row to use as header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.iloc[:, 0]
if header is None:
result.index.name = result.name = None
return result
def to_csv(self, path, index=True, sep=",", na_rep='', float_format=None,
header=False, index_label=None, mode='w', nanRep=None,
encoding=None, date_format=None, decimal='.'):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string file path or file handle / StringIO. If None is provided
the result is returned as a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
# result is only a string if no path provided, otherwise None
result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode, nanRep=nanRep,
encoding=encoding, date_format=date_format,
decimal=decimal)
if path is None:
return result
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return Series without null values
Returns
-------
valid : Series
inplace : boolean, default False
Do operation in place.
"""
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self._values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
# ----------------------------------------------------------------------
# Time series-oriented methods
def asof(self, where):
"""
Return last good (non-NaN) value in Series if value is NaN for
requested date.
If there is no good value, NaN is returned.
Parameters
----------
where : date or array of dates
Notes
-----
Dates are assumed to be sorted
Returns
-------
value or NaN
"""
if isinstance(where, compat.string_types):
where = datetools.to_datetime(where)
values = self._values
if not hasattr(where, '__iter__'):
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
return np.nan
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
while isnull(values[loc]) and loc > 0:
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where)
locs = self.index.asof_locs(where, notnull(values))
new_values = com.take_1d(values, locs)
return self._constructor(new_values, index=where).__finalize__(self)
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : Series with PeriodIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
# -------------------------------------------------------------------------
# Datetimelike delegation methods
def _make_dt_accessor(self):
try:
return maybe_to_datetimelike(self)
except Exception:
raise AttributeError("Can only use .dt accessor with datetimelike "
"values")
dt = base.AccessorProperty(CombinedDatetimelikeProperties,
_make_dt_accessor)
# -------------------------------------------------------------------------
# Categorical methods
def _make_cat_accessor(self):
if not is_categorical_dtype(self.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(self.values, self.index)
cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor)
def _dir_deletions(self):
return self._accessors
def _dir_additions(self):
rv = set()
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
except AttributeError:
pass
return rv
Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0})
Series._add_numeric_operations()
Series._add_series_only_operations()
Series._add_series_or_dataframe_operations()
_INDEX_TYPES = ndarray, Index, list, tuple
# -----------------------------------------------------------------------------
# Supplementary functions
def remove_na(series):
"""
Return series containing only true/non-NaN values, possibly empty.
"""
return series[notnull(_values_from_object(series))]
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass
thru a non-Index
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of ' 'index')
if isinstance(data, PeriodIndex):
data = data.asobject
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_internal_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index,
subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
# backwards compatiblity
class TimeSeries(Series):
def __init__(self, *args, **kwargs):
# deprecation TimeSeries, #10890
warnings.warn("TimeSeries is deprecated. Please use Series",
FutureWarning, stacklevel=2)
super(TimeSeries, self).__init__(*args, **kwargs)
# ----------------------------------------------------------------------
# Add plotting methods to Series
import pandas.tools.plotting as _gfx # noqa
Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods,
_gfx.SeriesPlotMethods)
Series.hist = _gfx.hist_series
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)
ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
| gpl-2.0 |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |