repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringlengths 1
4
| size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
msultan/msmbuilder | msmbuilder/utils/io.py | 9 | 2305 | from __future__ import print_function, division, absolute_import
import contextlib
import pickle
import warnings
import numpy as np
from sklearn.externals.joblib import load as jl_load
__all__ = ['printoptions', 'verbosedump', 'verboseload', 'dump', 'load']
warnings.warn("This module might be deprecated in favor of msmbuilder.io",
PendingDeprecationWarning)
@contextlib.contextmanager
def printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def dump(value, filename, compress=None, cache_size=None):
"""Save an arbitrary python object using pickle.
Parameters
-----------
value : any Python object
The object to store to disk using pickle.
filename : string
The name of the file in which it is to be stored
compress : None
No longer used
cache_size : positive number, optional
No longer used
See Also
--------
load : corresponding loader
"""
if compress is not None or cache_size is not None:
warnings.warn("compress and cache_size are no longer valid options")
with open(filename, 'wb') as f:
pickle.dump(value, f)
def load(filename):
"""Load an object that has been saved with dump.
We try to open it using the pickle protocol. As a fallback, we
use joblib.load. Joblib was the default prior to msmbuilder v3.2
Parameters
----------
filename : string
The name of the file to load.
"""
try:
with open(filename, 'rb') as f:
return pickle.load(f)
except Exception as e1:
try:
return jl_load(filename)
except Exception as e2:
raise IOError(
"Unable to load {} using the pickle or joblib protocol.\n"
"Pickle: {}\n"
"Joblib: {}".format(filename, e1, e2)
)
def verbosedump(value, fn, compress=None):
"""Verbose wrapper around dump"""
print('Saving "%s"... (%s)' % (fn, type(value)))
dump(value, fn, compress=compress)
def verboseload(fn):
"""Verbose wrapper around load.
Try to use pickle. If that fails, try to use joblib.
"""
print('loading "%s"...' % fn)
return load(fn)
| lgpl-2.1 |
uber/pyro | tests/distributions/test_zero_inflated.py | 1 | 4929 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
from pyro.distributions import (
Delta,
NegativeBinomial,
Normal,
Poisson,
ZeroInflatedDistribution,
ZeroInflatedNegativeBinomial,
ZeroInflatedPoisson,
)
from pyro.distributions.util import broadcast_shape
from tests.common import assert_close
@pytest.mark.parametrize("gate_shape", [(), (2,), (3, 1), (3, 2)])
@pytest.mark.parametrize("base_shape", [(), (2,), (3, 1), (3, 2)])
def test_zid_shape(gate_shape, base_shape):
gate = torch.rand(gate_shape)
base_dist = Normal(torch.randn(base_shape), torch.randn(base_shape).exp())
d = ZeroInflatedDistribution(base_dist, gate=gate)
assert d.batch_shape == broadcast_shape(gate_shape, base_shape)
assert d.support == base_dist.support
d2 = d.expand([4, 3, 2])
assert d2.batch_shape == (4, 3, 2)
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_0_gate(rate):
# if gate is 0 ZIP is Poisson
zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.zeros(1))
zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(-99.9))
pois = Poisson(torch.tensor(rate))
s = pois.sample((20,))
zip1_prob = zip1.log_prob(s)
zip2_prob = zip2.log_prob(s)
pois_prob = pois.log_prob(s)
assert_close(zip1_prob, pois_prob)
assert_close(zip2_prob, pois_prob)
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_1_gate(rate):
# if gate is 1 ZIP is Delta(0)
zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.ones(1))
zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(math.inf))
delta = Delta(torch.zeros(1))
s = torch.tensor([0.0, 1.0])
zip1_prob = zip1.log_prob(s)
zip2_prob = zip2.log_prob(s)
delta_prob = delta.log_prob(s)
assert_close(zip1_prob, delta_prob)
assert_close(zip2_prob, delta_prob)
@pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_mean_variance(gate, rate):
num_samples = 1000000
zip_ = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.tensor(gate))
s = zip_.sample((num_samples,))
expected_mean = zip_.mean
estimated_mean = s.mean()
expected_std = zip_.stddev
estimated_std = s.std()
assert_close(expected_mean, estimated_mean, atol=1e-02)
assert_close(expected_std, estimated_std, atol=1e-02)
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("probs", [0.1, 0.5, 0.9])
def test_zinb_0_gate(total_count, probs):
# if gate is 0 ZINB is NegativeBinomial
zinb1 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.zeros(1),
probs=torch.tensor(probs),
)
zinb2 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate_logits=torch.tensor(-99.9),
probs=torch.tensor(probs),
)
neg_bin = NegativeBinomial(torch.tensor(total_count), probs=torch.tensor(probs))
s = neg_bin.sample((20,))
zinb1_prob = zinb1.log_prob(s)
zinb2_prob = zinb2.log_prob(s)
neg_bin_prob = neg_bin.log_prob(s)
assert_close(zinb1_prob, neg_bin_prob)
assert_close(zinb2_prob, neg_bin_prob)
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("probs", [0.1, 0.5, 0.9])
def test_zinb_1_gate(total_count, probs):
# if gate is 1 ZINB is Delta(0)
zinb1 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.ones(1),
probs=torch.tensor(probs),
)
zinb2 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate_logits=torch.tensor(math.inf),
probs=torch.tensor(probs),
)
delta = Delta(torch.zeros(1))
s = torch.tensor([0.0, 1.0])
zinb1_prob = zinb1.log_prob(s)
zinb2_prob = zinb2.log_prob(s)
delta_prob = delta.log_prob(s)
assert_close(zinb1_prob, delta_prob)
assert_close(zinb2_prob, delta_prob)
@pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("logits", [-0.5, 0.5, -0.9, 1.9])
def test_zinb_mean_variance(gate, total_count, logits):
num_samples = 1000000
zinb_ = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.tensor(gate),
logits=torch.tensor(logits),
)
s = zinb_.sample((num_samples,))
expected_mean = zinb_.mean
estimated_mean = s.mean()
expected_std = zinb_.stddev
estimated_std = s.std()
assert_close(expected_mean, estimated_mean, atol=1e-01)
assert_close(expected_std, estimated_std, atol=1e-1)
| apache-2.0 |
vitaly-krugl/nupic | src/nupic/datafiles/extra/firstOrder/raw/makeDataset.py | 15 | 3462 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def createFirstOrderModel(numCategories=5, alpha=0.5):
categoryList = ['cat%02d' % i for i in range(numCategories)]
initProbability = numpy.ones(numCategories)/numCategories
transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories,
size=numCategories)
return categoryList, initProbability, transitionTable
def generateFirstOrderData(model, numIterations=10000, seqLength=5,
resets=True, suffix='train'):
print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength)
print "Filename",
categoryList, initProbability, transitionTable = model
initProbability = initProbability.cumsum()
transitionTable = transitionTable.cumsum(axis=1)
outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix)
print "Filename", outputFile
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
o = File(outputFile, fields)
seqIdx = 0
rand = numpy.random.rand()
catIdx = numpy.searchsorted(initProbability, rand)
for i in xrange(numIterations):
rand = numpy.random.rand()
if seqIdx == 0 and resets:
catIdx = numpy.searchsorted(initProbability, rand)
reset = 1
else:
catIdx = numpy.searchsorted(transitionTable[catIdx], rand)
reset = 0
o.write([reset,categoryList[catIdx]])
seqIdx = (seqIdx+1)%seqLength
o.close()
if __name__=='__main__':
numpy.random.seed(1956)
model = createFirstOrderModel()
categoryList = model[0]
categoryFile = open("categories.txt", 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
#import pylab
#pylab.imshow(model[2], interpolation='nearest')
#pylab.show()
for resets in [True, False]:
for seqLength in [2, 10]:
for numIterations in [1000, 10000, 100000]:
generateFirstOrderData(model,
numIterations=numIterations,
seqLength=seqLength,
resets=resets,
suffix='train_%s' % ('resets' if resets else 'noresets',))
generateFirstOrderData(model, numIterations=10000, seqLength=seqLength,
resets=resets,
suffix='test_%s' % ('resets' if resets else 'noresets',))
| agpl-3.0 |
Cysu/dlearn | docs/conf.py | 1 | 8603 | # -*- coding: utf-8 -*-
#
# dlearn documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 3 15:41:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
# Mock some third-party modules
mock_modules = ['numpy', 'theano', 'theano.tensor',
'matplotlib', 'matplotlib.pyplot', 'mpl_toolkits.axes_grid1',
'skimage', 'skimage.transform', 'skimage.color',
'sklearn', 'sklearn.preprocessing']
for m in mock_modules:
sys.modules[m] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dlearn'
copyright = u'2014, Tong Xiao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev'
# The full version, including alpha/beta/rc tags.
release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'dlearn.tex', u'dlearn Documentation',
u'Tong Xiao', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dlearn', u'dlearn Documentation',
[u'Tong Xiao'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dlearn', u'dlearn Documentation', u'Tong Xiao',
'dlearn', 'One line description of project.', 'Miscellaneous')
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
cc-archive/libvalidator | libvalidator/__init__.py | 1 | 13505 | # -*- coding: utf-8 -*-
"""The core of the library.
Copyright (C) 2008, 2009, 2010 Robert Gust‐Bardon and Creative Commons.
Originally contributed by Robert Gust‐Bardon.
This file is a part of the License Validation Library.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Robert Gust‐Bardon and Creative Commons"
__copyright__ = ("Copyright 2008, 2009, 2010 "
"Robert Gust‐Bardon and Creative Commons")
__credits__ = ["Robert Gust‐Bardon", "Asheesh Laroia"]
__license__ = ("GNU Lesser General Public License Version 3 "
"or any later version")
__version__ = "0.1.0"
__maintainer__ = "Robert Gust‐Bardon"
__status__ = "Beta"
import re, os, urlparse, urllib
from xml.dom import minidom
import html5lib
from html5lib import treebuilders
from pyRdfa import _process_DOM, Options
from pyRdfa.transform.DublinCore import DC_transform
import rdflib
import cc.license
class URLOpener(urllib.FancyURLopener):
"""Prevent the validation of Web pages that serve as an HTTP 404
message."""
def http_error_404(*args, **kwargs):
raise IOError
class libvalidator(object):
"""Extract license information."""
def __init__(self, *args, **kargs):
"""The constructor."""
# Namespaces that will be used in the SPARQL queries
self.namespaces = {
'old': rdflib.Namespace('http://web.resource.org/cc/'),
'cc': rdflib.Namespace('http://creativecommons.org/ns#'),
'dc': rdflib.Namespace('http://purl.org/dc/elements/1.1/'),
'xhv': rdflib.Namespace('http://www.w3.org/1999/xhtml/vocab#')
}
# The base IRI of the document that is being parsed
self.base = ''
# The headers that were sent along with the document that is
# being parsed
self.headers = None
# The location of the document that is being parsed
self.location = None
# The outcome of parsing
self.result = {
'licensedObjects': {}, # FIXME misleading name (subject)
'licenses': {},
'deprecated': False
}
def findBaseDocument(self):
"""Establish the IRI that will be used when the translation of
a relative IRI into an absolute IRI will occur."""
# FIXME There is no support for CURIEs.
# FIXME There is no support for ``xml:base``.
for e in self.dom.getElementsByTagName('base'):
if e.hasAttribute('href'):
self.base = e.getAttribute('href')
return
if self.headers is not None:
header = self.headers.get('Content-Location')
if header is not None:
self.base = unicode(header)
return
self.base = self.location
def formDocument(self, code):
"""Parse the source code written in RDF/XML, HTML, or XHTML in
order to obtain a DOM tree."""
# Translate CDATA blocks into PCDATA blocks.
reCDATA = re.compile(r'<!\[CDATA\[((?:[^\]]+|\](?!\]>))*)\]\]>')
for m in re.finditer(reCDATA, code):
code = code.replace(m.group(0), m.group(1).replace('&', '&') \
.replace('<', '<') \
.replace('>', '>'))
# Try to construct a DOM tree using xml.dom.minidom and, if
# that fails, fall back to using html5lib.HTMLParser to
# accomplish the goal.
dom = None
try:
dom = minidom.parseString(code)
except:
parser = html5lib.HTMLParser(
tree=treebuilders.getTreeBuilder('dom')
)
dom = parser.parse(code)
return (dom, dom.toxml())
def extractLicensedObjects(self, code, base):
"""Extract licensed object from documents written in RDF/XML."""
# FIXME When it comes to RDF, the name of method is misleading,
# because subjects are licensed, not objects (cf. the RDF
# triple).
# FIXME The base IRI is not used at all in this method.
# Obtain a DOM tree and a well-formed document given a
# document written in RDF/XML.
(dom, code) = self.formDocument(code)
# Obtain a graph from the DOM tree.
graph = rdflib.ConjunctiveGraph()
graph.parse(rdflib.StringInputSource(code.encode('utf-8')))
# Perform a SPARQL query looking for relations which state that
# a subject is licensed.
# FIXME Should dc:license be supported?
for row in graph.query(
'SELECT ?a ?b WHERE {'
'{ ?a old:license ?b }'
' UNION '
'{ ?a cc:license ?b }'
' UNION '
'{ ?a xhv:license ?b }'
' UNION '
'{ ?a dc:rights ?b }'
' UNION '
'{ ?a dc:rights.license ?b }'
'}',
initNs = dict(
old = self.namespaces['old'],
cc = self.namespaces['cc'],
dc = self.namespaces['dc'],
xhv = self.namespaces['xhv'])
):
# Each row (the result) contains a pair consisting of a
# subject and an object.
# Create an empty list for the licensing information on a
# given subject, should no information already exist.
if not self.result['licensedObjects'].has_key(str(row[0])):
self.result['licensedObjects'][str(row[0])] = []
# Case 1: the object is a blank node.
if isinstance(row[1], rdflib.BNode):
# FIXME Fish for interesting data (e.g. cc:Agent).
"""
triples = []
for r in graph.query('SELECT ?a ?b WHERE { '+
row[1].n3()+' ?a ?b }'):
triples.append((str(r[0]), str(r[1])))
self.result['licensedObjects'][str(row[0])].append(triples)
"""
pass
# Case 2: the object is an RDF URI reference.
elif isinstance(row[1], rdflib.URIRef):
# Check if the information has not already appeared.
if str(row[1]) not in \
self.result['licensedObjects'][str(row[0])]:
self.result['licensedObjects'][str(row[0])].\
append(str(row[1]))
# Retrieve information about the license stated
# using the object, if this license has not
# appeared earlier.
if not self.result['licenses'].has_key(str(row[1])):
license = self.parseLicense(str(row[1]))
# Check if any information about the license
# stated by the object has been retrieved
if license != str(row[1]):
# Store the information about the retrieved
# license
self.result['licenses'][str(row[1])] = \
self.parseLicense(str(row[1]))
# Case 3: the object is a literal.
else:
self.result['licensedObjects'][str(row[0])].\
append(str(row[1]))
def parse(self, code, location = None, headers = None,
openLocalFiles = False):
"""Parse the source code in order to retrieve statements
referring to licensed subjects."""
# Use the default location, if none is given.
if location is not None:
self.location = location
# Use the default headers, if none are given.
if headers is not None:
self.headers = headers
# Parse the source code and obtains a DOM tree and well-formed
# source code without any CDATA blocks.
(self.dom, self.code) = self.formDocument(code)
# Establish the base IRI for the document.
self.findBaseDocument()
# The following list stores all the documents written in
# RDF/XML that are meant to be processed in order to retrieve
# license information.
sources = []
# Find parts of the document (be it elements or comments) that
# contain RDF/XML.
reRDF = re.compile(
# <rdf:RDF
'<([^\s<>]+)'
# xmlns:dc="http://purl.org/dc/elements/1.1/"
'\s+(?:[^>]+\s+)?'
# xmlns
'xmlns'
# :rdf
'(?::[^=]+)?'
# =
'\s*=\s*'
# "http://www\.w3\.org/1999/02/22-rdf-syntax-ns#"
'(?:'
'("http://www\.w3\.org/1999/02/22-rdf-syntax-ns#")'
'|'
'(\'http://www\.w3\.org/1999/02/22\-rdf\-syntax\-ns#\')'
')'
# ><Work rdf:about="http://foo.bar/">...</Work>
'.*?'
# </rdf:RDF>
'</\\1\s*>'
, re.DOTALL)
for m in re.finditer(reRDF, code):
sources.append([self.base, m.group(0)])
# FIXME The element should only be classified as
# deprecated in case it actually contains license
# information.
# Note: this bug was discovered in February 2009 thanks to
# Mr. Hans Loepfe.
self.result['deprecated'] = True
# Obtain the RDF triples (written in RDF/XML) from the original
# document.
dump = _process_DOM(self.dom, self.location, 'xml',
Options(warnings = False,
transformers = [DC_transform]))
# Obtain a graph from the document written using the RDF/XML
# notation.
graph = rdflib.ConjunctiveGraph()
graph.parse(rdflib.StringInputSource(dump), format='xml')
# Append the document written in RDF/XML to the list of
# documents to be processed.
sources.append([self.base, graph.serialize(format='xml')])
# Iterate over the objects that provide metadata about the
# document that is being analysed.
for row in graph.query('SELECT ?b WHERE { ?a xhv:meta ?b . }',
initNs = dict(xhv = self.namespaces['xhv'])):
# Try to retrieve the resource.
try:
reHyperlink = re.compile(
'^(?:data:|((ftp|gopher|https?)://))\S+$',
re.IGNORECASE
)
if openLocalFiles == True or \
reHyperlink.search(row[0]) is not None:
f = URLOpener().open(row[0])
# FIXME No Internet media type detection is
# performed (it is assumed that the document
# is written using the RDF/XML notation).
if not unicode(row[0]).startswith('data:'):
# FIXME The Content-Location HTTP header is
# ignored.
sources.append([str(row[0]), f.read()])
else:
sources.append([self.base, f.read()])
except IOError:
pass
# Extract the license information from all the collected
# documents written in RDF/XML.
for (base, source) in sources:
self.extractLicensedObjects(source, base)
return self.result
def parseLicense(self, iri):
"""Employ cc.license in order to obtain additional information
about a license that is stated using an IRI."""
try:
license = cc.license.selectors.choose('standard').by_uri(iri)
except cc.license.CCLicenseError, err:
# Return the original IRI, should no data on the license
# be available through cc.license.
return iri
# Note: ``requires``, ``permits``, and ``prohibits`` were
# originally not used due to a bug in cc.license.
return {'title' : license.title(),
'description' : license.description(),
'jurisdiction' : license.jurisdiction,
'current_version' : license.current_version,
'version' : license.version,
'superseded' : license.superseded,
'deprecated' : license.deprecated,
'requires' : license.requires,
'permits' : license.permits,
'prohibits' : license.prohibits,
'libre' : license.libre,
'license_class' : license.license_class,
'license_code' : license.license_code,
'uri' : license.uri}
| lgpl-3.0 |
tensorflow/privacy | research/neuracrypt_attack_2021/attack.py | 1 | 21670 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This program solves the NeuraCrypt challenge to 100% accuracy.
# Given a set of encoded images and original versions of those,
# it shows how to match the original to the encoded.
import collections
import hashlib
import time
import multiprocessing as mp
import torch
import numpy as np
import torch.nn as nn
import scipy.stats
import matplotlib.pyplot as plt
from PIL import Image
import jax
import jax.numpy as jn
import objax
import scipy.optimize
import numpy as np
import multiprocessing as mp
# Objax neural network that's going to embed patches to a
# low dimensional space to guess if two patches correspond
# to the same orginal image.
class Model(objax.Module):
def __init__(self):
IN = 15
H = 64
self.encoder =objax.nn.Sequential([
objax.nn.Linear(IN, H),
objax.functional.leaky_relu,
objax.nn.Linear(H, H),
objax.functional.leaky_relu,
objax.nn.Linear(H, 8)])
self.decoder =objax.nn.Sequential([
objax.nn.Linear(IN, H),
objax.functional.leaky_relu,
objax.nn.Linear(H, H),
objax.functional.leaky_relu,
objax.nn.Linear(H, 8)])
self.scale = objax.nn.Linear(1, 1, use_bias=False)
def encode(self, x):
# Encode turns original images into feature space
a = self.encoder(x)
a = a/jn.sum(a**2,axis=-1,keepdims=True)**.5
return a
def decode(self, x):
# And decode turns encoded images into feature space
a = self.decoder(x)
a = a/jn.sum(a**2,axis=-1,keepdims=True)**.5
return a
# Proxy dataset for analysis
class ImageNet:
num_chan = 3
private_kernel_size = 16
hidden_dim = 2048
img_size = (256, 256)
private_depth = 7
def __init__(self, remove):
self.remove_pixel_shuffle = remove
# Original dataset as used in the NeuraCrypt paper
class Xray:
num_chan = 1
private_kernel_size = 16
hidden_dim = 2048
img_size = (256, 256)
private_depth = 4
def __init__(self, remove):
self.remove_pixel_shuffle = remove
## The following class is taken directly from the NeuraCrypt codebase.
## https://github.com/yala/NeuraCrypt
## which is originally licensed under the MIT License
class PrivateEncoder(nn.Module):
def __init__(self, args, width_factor=1):
super(PrivateEncoder, self).__init__()
self.args = args
input_dim = args.num_chan
patch_size = args.private_kernel_size
output_dim = args.hidden_dim
num_patches = (args.img_size[0] // patch_size) **2
self.noise_size = 1
args.input_dim = args.hidden_dim
layers = [
nn.Conv2d(input_dim, output_dim * width_factor, kernel_size=patch_size, dilation=1 ,stride=patch_size),
nn.ReLU()
]
for _ in range(self.args.private_depth):
layers.extend( [
nn.Conv2d(output_dim * width_factor, output_dim * width_factor , kernel_size=1, dilation=1, stride=1),
nn.BatchNorm2d(output_dim * width_factor, track_running_stats=False),
nn.ReLU()
])
self.image_encoder = nn.Sequential(*layers)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, output_dim * width_factor))
self.mixer = nn.Sequential( *[
nn.ReLU(),
nn.Linear(output_dim * width_factor, output_dim)
])
def forward(self, x):
encoded = self.image_encoder(x)
B, C, H,W = encoded.size()
encoded = encoded.view([B, -1, H*W]).transpose(1,2)
encoded += self.pos_embedding
encoded = self.mixer(encoded)
## Shuffle indicies
if not self.args.remove_pixel_shuffle:
shuffled = torch.zeros_like(encoded)
for i in range(B):
idx = torch.randperm(H*W, device=encoded.device)
for j, k in enumerate(idx):
shuffled[i,j] = encoded[i,k]
encoded = shuffled
return encoded
## End copied code
def setup(ds):
"""
Load the datasets to use. Nothing interesting to see.
"""
global x_train, y_train
if ds == 'imagenet':
import torchvision
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(256),
torchvision.transforms.ToTensor()])
imagenet_data = torchvision.datasets.ImageNet('/mnt/data/datasets/unpacked_imagenet_pytorch/',
split='val',
transform=transform)
data_loader = torch.utils.data.DataLoader(imagenet_data,
batch_size=100,
shuffle=True,
num_workers=8)
r = []
for x,_ in data_loader:
if len(r) > 1000: break
print(x.shape)
r.extend(x.numpy())
x_train = np.array(r)
print(x_train.shape)
elif ds == 'xray':
import torchvision
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(256),
torchvision.transforms.ToTensor()])
imagenet_data = torchvision.datasets.ImageFolder('CheXpert-v1.0/train',
transform=transform)
data_loader = torch.utils.data.DataLoader(imagenet_data,
batch_size=100,
shuffle=True,
num_workers=8)
r = []
for x,_ in data_loader:
if len(r) > 1000: break
print(x.shape)
r.extend(x.numpy())
x_train = np.array(r)
print(x_train.shape)
elif ds == 'challenge':
x_train = np.load("orig-7.npy")
print(np.min(x_train), np.max(x_train), x_train.shape)
else:
raise
def gen_train_data():
"""
Generate aligned training data to train a patch similarity function.
Given some original images, generate lots of encoded versions.
"""
global encoded_train, original_train
encoded_train = []
original_train = []
args = Xray(True)
C = 100
for i in range(30):
print(i)
torch.manual_seed(int(time.time()))
e = PrivateEncoder(args).cuda()
batch = np.random.randint(0, len(x_train), size=C)
xin = x_train[batch]
r = []
for i in range(0,C,32):
r.extend(e(torch.tensor(xin[i:i+32]).cuda()).detach().cpu().numpy())
r = np.array(r)
encoded_train.append(r)
original_train.append(xin)
def features_(x, moments=15, encoded=False):
"""
Compute higher-order moments for patches in an image to use as
features for the neural network.
"""
x = np.array(x, dtype=np.float32)
dim = 2
arr = np.array([np.mean(x, dim)] + [abs(scipy.stats.moment(x, moment=i, axis=dim))**(1/i) for i in range(1,moments)])
return arr.transpose((1,2,0))
def features(x, encoded):
"""
Given the original images or the encoded images, generate the
features to use for the patch similarity function.
"""
print('start shape',x.shape)
if len(x.shape) == 3:
x = x - np.mean(x,axis=0,keepdims=True)
else:
# count x 100 x 256 x 768
print(x[0].shape)
x = x - np.mean(x,axis=1,keepdims=True)
# remove per-neural-network dimension
x = x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
p = mp.Pool(96)
B = len(x) // 96
print(1)
bs = [x[i:i+B] for i in range(0,len(x),B)]
print(2)
r = p.map(features_, bs)
#r = features_(bs[0][:100])
print(3)
p.close()
#r = np.array(r)
#print('finish',r.shape)
return np.concatenate(r, axis=0)
def get_train_features():
"""
Create features for the entire datasets.
"""
global xs_train, ys_train
print(x_train.shape)
original_train_ = np.array(original_train)
encoded_train_ = np.array(encoded_train)
print("Computing features")
ys_train = features(encoded_train_, True)
patch_size = 16
ss = original_train_.shape[3] // patch_size
# Okay so this is an ugly transpose block.
# We are going from [outer_batch, batch_size, channels, width, height
# to [outer_batch, batch_size, channels, width/patch_size, patch_size, height/patch_size, patch_size]
# Then we reshape this and flatten so that we end up with
# [other_batch, batch_size, width/patch_size, height_patch_size, patch_size**2*channels]
# So that now we can run features on the last dimension
original_train_ = original_train_.reshape((original_train_.shape[0],
original_train_.shape[1],
original_train_.shape[2],
ss,patch_size,ss,patch_size)).transpose((0,1,3,5,2,4,6)).reshape((original_train_.shape[0], original_train_.shape[1], ss**2, patch_size**2))
xs_train = features(original_train_, False)
print(xs_train.shape, ys_train.shape)
def train_model():
"""
Train the patch similarity function
"""
global ema, model
model = Model()
def loss(x, y):
"""
K-way contrastive loss as in SimCLR et al.
The idea is that we should embed x and y so that they are similar
to each other, and dis-similar from others. To do this we have a
softmx loss over one dimension to make the values large on the diagonal
and small off-diagonal.
"""
a = model.encode(x)
b = model.decode(y)
mat = a@b.T
return objax.functional.loss.cross_entropy_logits_sparse(
logits=jn.exp(jn.clip(model.scale.w.value, -2, 4)) * mat,
labels=np.arange(a.shape[0])).mean()
ema = objax.optimizer.ExponentialMovingAverage(model.vars(), momentum=0.999)
gv = objax.GradValues(loss, model.vars())
encode_ema = ema.replace_vars(lambda x: model.encode(x))
decode_ema = ema.replace_vars(lambda y: model.decode(y))
def train_op(x, y):
"""
No one was ever fired for using Adam with 1e-4.
"""
g, v = gv(x, y)
opt(1e-4, g)
ema()
return v
opt = objax.optimizer.Adam(model.vars())
train_op = objax.Jit(train_op, gv.vars() + opt.vars() + ema.vars())
ys_ = ys_train
print(ys_.shape)
xs_ = xs_train.reshape((-1, xs_train.shape[-1]))
ys_ = ys_.reshape((-1, ys_train.shape[-1]))
# The model scale trick here is taken from CLIP.
# Let the model decide how confident to make its own predictions.
model.scale.w.assign(jn.zeros((1,1)))
valid_size = 1000
print(xs_train.shape)
# SimCLR likes big batches
B = 4096
for it in range(80):
print()
ms = []
for i in range(1000):
# First batch is smaller, to make training more stable
bs = [B // 64, B][it>0]
batch = np.random.randint(0, len(xs_)-valid_size, size=bs)
r = train_op(xs_[batch], ys_[batch])
# This shouldn't happen, but if it does, better to bort early
if np.isnan(r):
print("Die on nan")
print(ms[-100:])
return
ms.append(r)
print('mean',np.mean(ms), 'scale', model.scale.w.value)
print('loss',loss(xs_[-100:], ys_[-100:]))
a = encode_ema(xs_[-valid_size:])
b = decode_ema(ys_[-valid_size:])
br = b[np.random.permutation(len(b))]
print('score',np.mean(np.sum(a*b,axis=(1)) - np.sum(a*br,axis=(1))),
np.mean(np.sum(a*b,axis=(1)) > np.sum(a*br,axis=(1))))
ckpt = objax.io.Checkpoint("saved", keep_ckpts=0)
ema.replace_vars(lambda: ckpt.save(model.vars(), 0))()
def load_challenge():
"""
Load the challenge datast for attacking
"""
global xs, ys, encoded, original, ooriginal
print("SETUP: Loading matrixes")
# The encoded images
encoded = np.load("challenge-7.npy")
# And the original images
ooriginal = original = np.load("orig-7.npy")
print("Sizes", encoded.shape, ooriginal.shape)
# Again do that ugly resize thing to make the features be on the last dimension
# Look up above to see what's going on.
patch_size = 16
ss = original.shape[2] // patch_size
original = ooriginal.reshape((original.shape[0],1,ss,patch_size,ss,patch_size))
original = original.transpose((0,2,4,1,3,5))
original = original.reshape((original.shape[0], ss**2, patch_size**2))
def match_sub(args):
"""
Find the best way to undo the permutation between two images.
"""
vec1, vec2 = args
value = np.sum((vec1[None,:,:] - vec2[:,None,:])**2,axis=2)
row, col = scipy.optimize.linear_sum_assignment(value)
return col
def recover_local_permutation():
"""
Given a set of encoded images, return a new encoding without permutations
"""
global encoded, ys
p = mp.Pool(96)
print('recover local')
local_perm = p.map(match_sub, [(encoded[0], e) for e in encoded])
local_perm = np.array(local_perm)
encoded_perm = []
for i in range(len(encoded)):
encoded_perm.append(encoded[i][np.argsort(local_perm[i])])
encoded_perm = np.array(encoded_perm)
encoded = np.array(encoded_perm)
p.close()
def recover_better_local_permutation():
"""
Given a set of encoded images, return a new encoding, but better!
"""
global encoded, ys
# Now instead of pairing all images to image 0, we compute the mean l2 vector
# and then pair all images onto the mean vector. Slightly more noise resistant.
p = mp.Pool(96)
target = encoded.mean(0)
local_perm = p.map(match_sub, [(target, e) for e in encoded])
local_perm = np.array(local_perm)
# Probably we didn't change by much, generally <0.1%
print('improved changed by', np.mean(local_perm != np.arange(local_perm.shape[1])))
encoded_perm = []
for i in range(len(encoded)):
encoded_perm.append(encoded[i][np.argsort(local_perm[i])])
encoded = np.array(encoded_perm)
p.close()
def compute_patch_similarity():
"""
Compute the feature vectors for each patch using the trained neural network.
"""
global xs, ys, xs_image, ys_image
print("Computing features")
ys = features(encoded, encoded=True)
xs = features(original, encoded=False)
model = Model()
ckpt = objax.io.Checkpoint("saved", keep_ckpts=0)
ckpt.restore(model.vars())
xs_image = model.encode(xs)
ys_image = model.decode(ys)
assert xs.shape[0] == xs_image.shape[0]
print("Done")
def match(args, ret_col=False):
"""
Compute the similarity between image features and encoded features.
"""
vec1, vec2s = args
r = []
open("/tmp/start%d.%d"%(np.random.randint(10000),time.time()),"w").write("hi")
for vec2 in vec2s:
value = np.sum(vec1[None,:,:] * vec2[:,None,:],axis=2)
row, col = scipy.optimize.linear_sum_assignment(-value)
r.append(value[row,col].mean())
return r
def recover_global_matching_first():
"""
Recover the global matching of original to encoded images by doing
an all-pairs matching problem
"""
global global_matching, ys_image, encoded
matrix = []
p = mp.Pool(96)
xs_image_ = np.array(xs_image)
ys_image_ = np.array(ys_image)
matrix = p.map(match, [(x, ys_image_) for x in xs_image_])
matrix = np.array(matrix).reshape((xs_image.shape[0],
xs_image.shape[0]))
row, col = scipy.optimize.linear_sum_assignment(-np.array(matrix))
global_matching = np.argsort(col)
print('glob',list(global_matching))
p.close()
def recover_global_permutation():
"""
Find the way that the encoded images are permuted off of the original images
"""
global global_permutation
print("Glob match", global_matching)
overall = []
for i,j in enumerate(global_matching):
overall.append(np.sum(xs_image[j][None,:,:] * ys_image[i][:,None,:],axis=2))
overall = np.mean(overall, 0)
row, col = scipy.optimize.linear_sum_assignment(-overall)
try:
print("Changed frac:", np.mean(global_permutation!=np.argsort(col)))
except:
pass
global_permutation = np.argsort(col)
def recover_global_matching_second():
"""
Match each encoded image with its original encoded image,
but better by relying on the global permutation.
"""
global global_matching_second, global_matching
ys_fix = []
for i in range(ys_image.shape[0]):
ys_fix.append(ys_image[i][global_permutation])
ys_fix = np.array(ys_fix)
print(xs_image.shape)
sims = []
for i in range(0,len(xs_image),10):
tmp = np.mean(xs_image[None,:,:,:] * ys_fix[i:i+10][:,None,:,:],axis=(2,3))
sims.extend(tmp)
sims = np.array(sims)
print(sims.shape)
row, col = scipy.optimize.linear_sum_assignment(-sims)
print('arg',sims.argmax(1))
print("Same matching frac", np.mean(col == global_matching) )
print(col)
global_matching = col
def extract_by_training(resume):
"""
Final recovery process by extracting the neural network
"""
global inverse
device = torch.device('cuda:1')
if not resume:
inverse = PrivateEncoder(Xray(True)).cuda(device)
# More adam to train.
optimizer = torch.optim.Adam(inverse.parameters(), lr=0.0001)
this_xs = ooriginal[global_matching]
this_ys = encoded[:,global_permutation,:]
for i in range(2000):
idx = np.random.random_integers(0, len(this_xs)-1, 32)
xbatch = torch.tensor(this_xs[idx]).cuda(device)
ybatch = torch.tensor(this_ys[idx]).cuda(device)
optimizer.zero_grad()
guess_output = inverse(xbatch)
# L1 loss because we don't want to be sensitive to outliers
error = torch.mean(torch.abs(guess_output-ybatch))
error.backward()
optimizer.step()
print(error)
def test_extract():
"""
Now we can recover the matching much better by computing the estimated
encodings for each original image.
"""
global err, global_matching, guessed_encoded, smatrix
device = torch.device('cuda:1')
print(ooriginal.shape, encoded.shape)
out = []
for i in range(0,len(ooriginal),32):
print(i)
out.extend(inverse(torch.tensor(ooriginal[i:i+32]).cuda(device)).cpu().detach().numpy())
guessed_encoded = np.array(out)
# Now we have to compare each encoded image with every other original image.
# Do this fast with some matrix multiplies.
out = guessed_encoded.reshape((len(encoded), -1))
real = encoded[:,global_permutation,:].reshape((len(encoded), -1))
@jax.jit
def foo(x, y):
return jn.square(x[:,None] - y[None,:]).sum(2)
smatrix = np.zeros((len(out), len(out)))
B = 500
for i in range(0,len(out),B):
print(i)
for j in range(0,len(out),B):
smatrix[i:i+B, j:j+B] = foo(out[i:i+B], real[j:j+B])
# And the final time you'l have to look at a min weight matching, I promise.
row, col = scipy.optimize.linear_sum_assignment(np.array(smatrix))
r = np.array(smatrix)
print(list(row)[::100])
print("Differences", np.mean(np.argsort(col) != global_matching))
global_matching = np.argsort(col)
def perf(steps=[]):
if len(steps) == 0:
steps.append(time.time())
else:
print("Last Time Elapsed:", time.time()-steps[-1], ' Total Time Elapsed:', time.time()-steps[0])
steps.append(time.time())
time.sleep(1)
if __name__ == "__main__":
if True:
perf()
setup('challenge')
perf()
gen_train_data()
perf()
get_train_features()
perf()
train_model()
perf()
if True:
load_challenge()
perf()
recover_local_permutation()
perf()
recover_better_local_permutation()
perf()
compute_patch_similarity()
perf()
recover_global_matching_first()
perf()
for _ in range(3):
recover_global_permutation()
perf()
recover_global_matching_second()
perf()
for i in range(3):
recover_global_permutation()
perf()
extract_by_training(i > 0)
perf()
test_extract()
perf()
print(perf())
| apache-2.0 |
abakan/napi | napi/transformers.py | 1 | 16938 | """This module defines abstract syntax three transformers (ASTs) that handles
array operations delicately.
.. ipython:: python
:suppress:
from numpy import *
import napi
napi.register_magic()
%napi
napi ASTs has the following options:
.. glossary::
squeezing
when shapes of arrays in a logical operation do not match, squeezing
removes single-dimensional entries from the shape and tries to
compare the arrays again:
.. ipython::
In [1]: %napi squeeze
In [2]: ones(4, bool) or zeros((1,4,1), bool)
short-circuiting
*napi* ASTs perform short-circuit evaluation in the same way Python
boolean operators does. This is performed when the number of elements
in arrays is larger or equal to the user set threshold:
.. ipython::
In [2]: z = zeros(10000000, bool)
In [3]: %time z and z and z
In [4]: %napi shortcircuit 1000
In [5]: %time z and z and z
In this extreme example where all elements of the first array is
false, the operation is more than 10 times faster.
.. ipython::
In [1]: randbools = lambda *n: random.randn(*n) < 0
In [1]: a, b, c, d, e, f, g = (randbools(10000) for i in range(7))
In [1]: %time with_sc = a or b or c or d or e or f or g
In [1]: %napi shortcircuit
In [1]: %time wout_sc = a or b or c or d or e or f or g
In [1]: all(with_sc == wout_sc)
In this example too, short-circuiting performs considerably faster.
.. note::
For really small arrays (~100 elements), short-circuiting may
perform worse, but since the cost of operation is negligible
such performance loss will usually be acceptable.
.. _truth: http://docs.python.org/library/stdtypes.html#truth-value-testing
"""
import ast
import operator
from ast import fix_missing_locations as fml
from ast import copy_location, parse
from _ast import Name, Expression, Num, Str, keyword
from _ast import And, Or, Not, Eq, NotEq, Lt, LtE, Gt, GtE
from _ast import BoolOp, Compare, Subscript, Load, Index, Call, List
from _ast import Dict
from numbers import Number
import numpy
from numpy import ndarray
_setdefault = {}.setdefault
ZERO = lambda dtype: _setdefault(dtype, numpy.zeros(1, dtype)[0])
__all__ = ['NapiTransformer', 'LazyTransformer',
'napi_compare', 'napi_and', 'napi_or']
def ast_name(id, ctx=Load()):
name = Name(id, ctx)
name._fields = ('id', 'ctx')
return name
def ast_smart(val):
"""Return a suitable subclass of :class:`ast.AST` for storing numbers
or strings. For other type of objects, return a node class that will
indicate that the variable is contained in one of global or local
namespaces."""
if isinstance(val, Number):
return Num(n=val)
elif isinstance(val, basestring):
return Str(s=val)
else:
return ast_name(str(val))
COMPARE = {
Eq: operator.eq,
NotEq: operator.ne,
Lt: operator.lt,
LtE: operator.le,
Gt: operator.gt,
GtE: operator.ge,
'Eq': operator.eq,
'NotEq': operator.ne,
'Lt': operator.lt,
'LtE': operator.le,
'Gt': operator.gt,
'GtE': operator.ge,
}
ATTRMAP = {
Num: 'n',
Str: 's',
}
EVALSET = {List: '',
Dict: ''
}
RESERVED = {'True': True, 'False': False, 'None': None}
def napi_compare(left, ops, comparators, **kwargs):
"""Make pairwise comparisons of comparators."""
values = []
for op, right in zip(ops, comparators):
value = COMPARE[op](left, right)
values.append(value)
left = right
result = napi_and(values, **kwargs)
if isinstance(result, ndarray):
return result
else:
return bool(result)
def napi_and(values, **kwargs):
"""Perform element-wise logical *and* operation on arrays.
If *values* contains a non-array object with truth_ value **False**, the
outcome will be an array of **False**\s with suitable shape without arrays
being evaluated. Non-array objects with truth value **True** are omitted.
If array shapes do not match (after squeezing when enabled by user),
:exc:`ValueError` is raised.
This function uses :obj:`numpy.logical_and` or :obj:`numpy.all`."""
arrays = []
result = None
shapes = set()
for value in values:
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif not value:
result = value
if len(shapes) > 1 and kwargs.get('sq', kwargs.get('squeeze', False)):
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
if len(shapes) > 1:
raise ValueError('array shape mismatch')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.zeros(shape, bool)
else:
return result
elif arrays:
sc = kwargs.get('sc', kwargs.get('shortcircuit', 0))
if sc and numpy.prod(shape) >= sc:
return short_circuit_and(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_and(*arrays)
else:
return numpy.all(arrays, 0)
else:
return value
def short_circuit_and(arrays, shape):
a = arrays.pop(0)
nz = (a if a.dtype == bool else a.astype(bool)).nonzero()
if len(nz) > 1:
while arrays:
a = arrays.pop()[nz]
which = a if a.dtype == bool else a.astype(bool)
nz = tuple(i[which] for i in nz)
else:
nz = nz[0]
while arrays:
a = arrays.pop()[nz]
nz = nz[a if a.dtype == bool else a.astype(bool)]
result = numpy.zeros(shape, bool)
result[nz] = True
return result
def napi_or(values, **kwargs):
"""Perform element-wise logical *or* operation on arrays.
If *values* contains a non-array object with truth_ value **True**, the
outcome will be an array of **True**\s with suitable shape without arrays
being evaluated. Non-array objects with truth value **False** are omitted.
If array shapes do not match (after squeezing when enabled by user),
:exc:`ValueError` is raised.
This function uses :obj:`numpy.logical_or` or :obj:`numpy.any`."""
arrays = []
result = None
shapes = set()
for value in values:
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif value:
result = value
if len(shapes) > 1 and kwargs.get('squeeze', kwargs.get('sq', False)):
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
if len(shapes) > 1:
raise ValueError('array shape mismatch')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.ones(shape, bool)
else:
return result
elif arrays:
sc = kwargs.get('sc', kwargs.get('shortcircuit', 0))
if sc and numpy.prod(shape) >= sc:
return short_circuit_or(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_or(*arrays)
else:
return numpy.any(arrays, 0)
else:
return value
def short_circuit_or(arrays, shape):
a = arrays.pop(0)
z = ZERO(a.dtype)
nz = (a == z).nonzero()
if len(nz) > 1:
while arrays:
a = arrays.pop()
which = a[nz] == ZERO(a.dtype)
nz = tuple(i[which] for i in nz)
else:
nz = nz[0]
while arrays:
a = arrays.pop()[nz]
nz = nz[a == ZERO(a.dtype)]
result = numpy.ones(shape, bool)
result[nz] = False
return result
class LazyTransformer(ast.NodeTransformer):
"""An :mod:`ast` transformer that replaces chained comparison and logical
operation expressions with function calls."""
def __init__(self, **kwargs):
self._prefix = kwargs.pop('prefix', '')
self._kwargs = [keyword(arg=key, value=ast_smart(value))
for key, value in kwargs.items()]
def visit_Compare(self, node):
"""Replace chained comparisons with calls to :func:`.napi_compare`."""
if len(node.ops) > 1:
func = Name(id=self._prefix + 'napi_compare', ctx=Load())
args = [node.left,
List(elts=[Str(op.__class__.__name__)
for op in node.ops], ctx=Load()),
List(elts=node.comparators, ctx=Load())]
node = Call(func=func, args=args, keywords=self._kwargs)
fml(node)
self.generic_visit(node)
return node
def visit_BoolOp(self, node):
"""Replace logical operations with calls to :func:`.napi_and` or
:func:`.napi_or`."""
if isinstance(node.op, And):
func = Name(id=self._prefix + 'napi_and', ctx=Load())
else:
func = Name(id=self._prefix + 'napi_or', ctx=Load())
args = [List(elts=node.values, ctx=Load())]
node = Call(func=func, args=args, keywords=self._kwargs)
fml(node)
self.generic_visit(node)
return node
class NapiTransformer(ast.NodeTransformer):
"""An :mod:`ast` transformer that evaluates chained comparison and logical
operation expressions of arrays while transforming the AST."""
def __init__(self, **kwargs):
self._g, self._l = kwargs.pop('globals', {}), kwargs.pop('locals', {})
self._ti = 0
self._kwargs = kwargs
self._indent = 0
if not kwargs.get('debug', False):
self._debug = lambda *args, **kwargs: None
self._sc = kwargs.get('sc', 10000)
#self._which = None
self._evaluate = kwargs.get('evaluate', False)
self._subscript = kwargs.get('subscript')
def __getitem__(self, node):
if isinstance(node, Name):
name = node.id
try:
return self._l[name]
except KeyError:
try:
return self._g[name]
except KeyError:
try:
return RESERVED[name]
except KeyError:
raise NameError('name {} is not defined :)'
.format(repr(name)))
try:
return getattr(node, ATTRMAP[node.__class__])
except KeyError:
self._debug('_get', node)
expr = Expression(fml(NapiTransformer(globals=self._g,
locals=self._l,
**self._kwargs).visit(node)))
try:
return eval(compile(expr, '<string>', 'eval'), self._g, self._l)
except Exception as err:
raise err
if node.__class__ in EVALSET:
return eval(node)
else:
return getattr(node, ATTRMAP[node.__class__])
def __setitem__(self, name, value):
self._debug('self[{}] = {}'.format(name ,value))
if self._subscript:
self._l[self._subscript][name] = value
else:
self._l[name] = value
def _tn(self):
"""Return a temporary variable name."""
self._ti += 1
return '__temp__' + str(self._ti)
def _incr(self):
self._indent += 1
def _decr(self):
self._indent -= 1
def _debug(self, *args, **kwargs):
print((self._indent + kwargs.get('incr', 0)) * ' ' +
' '.join(['{}'] * len(args)).format(*args))
def visit_UnaryOp(self, node):
"""Interfere with ``not`` operation to :func:`numpy.logical_not`."""
if isinstance(node.op, Not):
self._debug('UnaryOp', node.op, incr=1)
operand = self[node.operand]
self._debug('|-', operand, incr=2)
tn = self._tn()
result = numpy.logical_not(operand)
self._debug('|_', result, incr=2)
self[tn] = result
return ast_name(tn)
else:
return self.generic_visit(node)
def visit_Compare(self, node):
self._debug('Compare', node.ops, incr=1)
if len(node.ops) > 1:
values = []
left = self[node.left]
for op, right in zip(node.ops, node.comparators):
right = self[right]
tn = self._tn()
value = COMPARE[op.__class__](left, right)
self[tn] = value
values.append(ast_name(tn, value))
left = right
val = BoolOp(And(), values)
return self.visit_BoolOp(val)
self.generic_visit(node)
return node
def visit_BoolOp(self, node):
"""Interfere with boolean operations and use :func:`numpy.all` and
:func:`numpy.any` functions for ``and`` and ``or`` operations.
*axis* argument to these functions is ``0``."""
self._incr()
self._debug('BoolOp', node.op)
if isinstance(node.op, And):
result = self._and(node)
else:
result = self._or(node)
self._debug('|_', result, incr=1)
self._decr()
return self._return(result, node)
def _and(self, node):
arrays = []
result = None
shapes = set()
for item in node.values:
value = self[item]
self._debug('|-', value, incr=1)
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif not value:
result = value
if len(shapes) > 1:
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.zeros(shape, bool)
else:
return result
elif arrays:
self._debug('|~ Arrays:', arrays, incr=1)
if self._sc and numpy.prod(shape) >= self._sc:
return short_circuit_and(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_and(*arrays)
else:
return numpy.all(arrays, 0)
else:
return value
def _or(self, node):
arrays = []
result = None
shapes = set()
for item in node.values:
value = self[item]
self._debug('|-', value, incr=1)
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif value:
result = value
if len(shapes) > 1:
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.ones(shape, bool)
else:
return result
elif arrays:
self._debug('|~ Arrays:', arrays, incr=1)
if self._sc and numpy.prod(shape) >= self._sc:
return short_circuit_or(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_or(*arrays)
else:
return numpy.any(arrays, 0)
else:
return value
def _return(self, val, node):
tn = self._tn()
if self._subscript:
self._l['_napi_temp_ns'][tn] = val
return Subscript(
value=Name(id=self._subscript, ctx=Load()),
slice=Index(value=Str(s=tn)),
ctx=getattr(node, 'ctx', Load()))
else:
self[tn] = val
return ast_name(tn)
def _default(self, node):
self._debug('default', node)
expr = Expression(fml(Transformer(self._g, self._l, **self._kwargs)
.visit(node)))
result = eval(compile(expr, '<string>', 'eval'), self._g, self._l)
tn = self._tn()
self[tn] = result
return ast_name(tn) | mit |
mtausig/RIOT | tests/pkg_tensorflow-lite/mnist/generate_digit.py | 22 | 1164 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = mnist.load_data()
data = mnist_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
sstober/deepthought | deepthought/datasets/eeg/MultiChannelEEGDataset.py | 1 | 18423 | '''
Created on Apr 2, 2014
@author: sstober
'''
import os
import logging
log = logging.getLogger(__name__)
import collections
import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.utils.timing import log_timing
from pylearn2.utils import serial
from pylearn2.format.target_format import OneHotFormatter
import librosa
from deepthought.util.fs_util import load
from deepthought.util.timeseries_util import frame
from deepthought.datasets.eeg.channel_filter import NoChannelFilter
def load_datafiles_metadata(path):
def tree():
return collections.defaultdict(tree)
def multi_dimensions(n, dtype):
""" Creates an n-dimension dictionary where the n-th dimension is of type 'type'
"""
if n == 0:
return dtype()
return collections.defaultdict(lambda:multi_dimensions(n-1, dtype))
datafiles = multi_dimensions(4, list)
# datafiles = collections.defaultdict(lambda:collections.defaultdict(lambda:collections.defaultdict(list)))
metadb = load(os.path.join(path, 'metadata_db.pklz'))
for datafile, metadata in metadb.items():
subject = metadata['subject']
trial_type = metadata['trial_type']
trial_number = metadata['trial_no']
condition = metadata['condition']
# datafiles[subject][trial_type][trial_number][condition].append(os.path.join(path, datafile));
datafiles[subject][trial_type][trial_number][condition].append(datafile)
log.debug('{} {} {} {} : {}'.format(subject,trial_type,trial_number,condition,datafile))
return datafiles, metadb
class MultiChannelEEGDataset(DenseDesignMatrix):
"""
this class is deprecated
use EEGEpochsDataset for more flexibility
"""
class Like(object):
"""
Helper class for lazy people to load an MultiChannelEEGDataset with similar parameters
Note: This is quite a hack as __new__ should return instances of Like.
Instead, it returns the loaded MultiChannelEEGDataset
"""
def __new__(Like,
base, # reference to copy initialize values from
**override
):
params = base.params.copy()
log.debug("base params: {}".format(params))
log.debug("override params: {}".format(override))
for key, value in override.iteritems():
params[key] = value
log.debug("merged params: {}".format(params))
return MultiChannelEEGDataset(**params)
def __init__(self,
path,
name = '', # optional name
# selectors
subjects='all', # optional selector (list) or 'all'
trial_types='all', # optional selector (list) or 'all'
trial_numbers='all', # optional selector (list) or 'all'
conditions='all', # optional selector (list) or 'all'
partitioner = None,
channel_filter = NoChannelFilter(), # optional channel filter, default: keep all
channel_names = None, # optional channel names (for metadata)
label_map = None, # optional conversion of labels
remove_dc_offset = False, # optional subtraction of channel mean, usually done already earlier
resample = None, # optional down-sampling
# optional sub-sequences selection
start_sample = 0,
stop_sample = None, # optional for selection of sub-sequences
# optional signal filter to by applied before spitting the signal
signal_filter = None,
# windowing parameters
frame_size = -1,
hop_size = -1, # values > 0 will lead to windowing
hop_fraction = None, # alternative to specifying absolute hop_size
# optional spectrum parameters, n_fft = 0 keeps raw data
n_fft = 0,
n_freq_bins = None,
spectrum_log_amplitude = False,
spectrum_normalization_mode = None,
include_phase = False,
flatten_channels=False,
layout='tf', # (0,1)-axes layout tf=time x features or ft=features x time
save_matrix_path = None,
keep_metadata = False,
):
'''
Constructor
'''
# save params
self.params = locals().copy()
del self.params['self']
# print self.params
# TODO: get the whole filtering into an extra class
datafiles_metadata, metadb = load_datafiles_metadata(path)
# print datafiles_metadata
def apply_filters(filters, node):
if isinstance(node, dict):
filtered = []
keepkeys = filters[0]
for key, value in node.items():
if keepkeys == 'all' or key in keepkeys:
filtered.extend(apply_filters(filters[1:], value))
return filtered
else:
return node # [node]
# keep only files that match the metadata filters
self.datafiles = apply_filters([subjects,trial_types,trial_numbers,conditions], datafiles_metadata)
# copy metadata for retained files
self.metadb = {}
for datafile in self.datafiles:
self.metadb[datafile] = metadb[datafile]
# print self.datafiles
# print self.metadb
self.name = name
if partitioner is not None:
self.datafiles = partitioner.get_partition(self.name, self.metadb)
self.include_phase = include_phase
self.spectrum_normalization_mode = spectrum_normalization_mode
self.spectrum_log_amplitude = spectrum_log_amplitude
self.sequence_partitions = [] # used to keep track of original sequences
# metadata: [subject, trial_no, stimulus, channel, start, ]
self.metadata = []
sequences = []
labels = []
n_sequences = 0
if frame_size > 0 and hop_size == -1 and hop_fraction is not None:
hop_size = np.ceil(frame_size / hop_fraction)
for i in xrange(len(self.datafiles)):
with log_timing(log, 'loading data from {}'.format(self.datafiles[i])):
# save start of next sequence
self.sequence_partitions.append(n_sequences)
data, metadata = load(os.path.join(path, self.datafiles[i]))
label = metadata['label']
if label_map is not None:
label = label_map[label]
multi_channel_frames = []
# process 1 channel at a time
for channel in xrange(data.shape[1]):
# filter channels
if not channel_filter.keep_channel(channel):
continue
samples = data[:, channel]
# subtract channel mean
if remove_dc_offset:
samples -= samples.mean()
# down-sample if requested
if resample is not None and resample[0] != resample[1]:
samples = librosa.resample(samples, resample[0], resample[1])
# apply optional signal filter after down-sampling -> requires lower order
if signal_filter is not None:
samples = signal_filter.process(samples)
# get sub-sequence in resampled space
# log.info('using samples {}..{} of {}'.format(start_sample,stop_sample, samples.shape))
samples = samples[start_sample:stop_sample]
if n_fft is not None and n_fft > 0: # Optionally:
### frequency spectrum branch ###
# transform to spectogram
hop_length = n_fft / 4;
'''
from http://theremin.ucsd.edu/~bmcfee/librosadoc/librosa.html
>>> # Get a power spectrogram from a waveform y
>>> S = np.abs(librosa.stft(y)) ** 2
>>> log_S = librosa.logamplitude(S)
'''
S = librosa.core.stft(samples, n_fft=n_fft, hop_length=hop_length)
# mag = np.abs(S) # magnitude spectrum
mag = np.abs(S)**2 # power spectrum
# include phase information if requested
if self.include_phase:
# phase = np.unwrap(np.angle(S))
phase = np.angle(S)
# Optionally: cut off high bands
if n_freq_bins is not None:
mag = mag[0:n_freq_bins, :]
if self.include_phase:
phase = phase[0:n_freq_bins, :]
if self.spectrum_log_amplitude:
mag = librosa.logamplitude(mag)
s = mag # for normalization
'''
NOTE on normalization:
It depends on the structure of a neural network and (even more)
on the properties of data. There is no best normalization algorithm
because if there would be one, it would be used everywhere by default...
In theory, there is no requirement for the data to be normalized at all.
This is a purely practical thing because in practice convergence could
take forever if your input is spread out too much. The simplest would be
to just normalize it by scaling your data to (-1,1) (or (0,1) depending
on activation function), and in most cases it does work. If your
algorithm converges well, then this is your answer. If not, there are
too many possible problems and methods to outline here without knowing
the actual data.
'''
## normalize to mean 0, std 1
if self.spectrum_normalization_mode == 'mean0_std1':
# s = preprocessing.scale(s, axis=0);
mean = np.mean(s)
std = np.std(s)
s = (s - mean) / std
## normalize by linear transform to [0,1]
elif self.spectrum_normalization_mode == 'linear_0_1':
s = s / np.max(s)
## normalize by linear transform to [-1,1]
elif self.spectrum_normalization_mode == 'linear_-1_1':
s = -1 + 2 * (s - np.min(s)) / (np.max(s) - np.min(s))
elif self.spectrum_normalization_mode is not None:
raise ValueError(
'unsupported spectrum normalization mode {}'.format(
self.spectrum_normalization_mode)
)
#print s.mean(axis=0)
#print s.std(axis=0)
# include phase information if requested
if self.include_phase:
# normalize phase to [-1.1]
phase = phase / np.pi
s = np.vstack([s, phase])
# transpose to fit pylearn2 layout
s = np.transpose(s)
# print s.shape
### end of frequency spectrum branch ###
else:
### raw waveform branch ###
# normalize to max amplitude 1
s = librosa.util.normalize(samples)
# add 2nd data dimension
s = s.reshape(s.shape[0], 1)
# print s.shape
### end of raw waveform branch ###
s = np.asfarray(s, dtype='float32')
if frame_size > 0 and hop_size > 0:
s = s.copy() # FIXME: THIS IS NECESSARY IN MultiChannelEEGSequencesDataset - OTHERWISE, THE FOLLOWING OP DOES NOT WORK!!!!
frames = frame(s, frame_length=frame_size, hop_length=hop_size)
else:
frames = s
del s
# print frames.shape
if flatten_channels:
# add artificial channel dimension
frames = frames.reshape((frames.shape[0], frames.shape[1], frames.shape[2], 1))
# print frames.shape
sequences.append(frames)
# increment counter by new number of frames
n_sequences += frames.shape[0]
if keep_metadata:
# determine channel name
channel_name = None
if channel_names is not None:
channel_name = channel_names[channel]
elif 'channels' in metadata:
channel_name = metadata['channels'][channel]
self.metadata.append({
'subject' : metadata['subject'], # subject
'trial_type': metadata['trial_type'], # trial_type
'trial_no' : metadata['trial_no'], # trial_no
'condition' : metadata['condition'], # condition
'channel' : channel, # channel
'channel_name' : channel_name,
'start' : self.sequence_partitions[-1], # start
'stop' : n_sequences # stop
})
for _ in xrange(frames.shape[0]):
labels.append(label)
else:
multi_channel_frames.append(frames)
### end of channel iteration ###
if not flatten_channels:
# turn list into array
multi_channel_frames = np.asfarray(multi_channel_frames, dtype='float32')
# [channels x frames x time x freq] -> cb01
# [channels x frames x time x 1] -> cb0.
# move channel dimension to end
multi_channel_frames = np.rollaxis(multi_channel_frames, 0, 4)
# print multi_channel_frames.shape
# log.debug(multi_channel_frames.shape)
sequences.append(multi_channel_frames)
# increment counter by new number of frames
n_sequences += multi_channel_frames.shape[0]
if keep_metadata:
self.metadata.append({
'subject' : metadata['subject'], # subject
'trial_type': metadata['trial_type'], # trial_type
'trial_no' : metadata['trial_no'], # trial_no
'condition' : metadata['condition'], # condition
'channel' : 'all', # channel
'start' : self.sequence_partitions[-1], # start
'stop' : n_sequences # stop
})
for _ in xrange(multi_channel_frames.shape[0]):
labels.append(label)
### end of datafile iteration ###
# turn into numpy arrays
sequences = np.vstack(sequences)
# print sequences.shape;
labels = np.hstack(labels)
# one_hot_y = one_hot(labels)
one_hot_formatter = OneHotFormatter(labels.max() + 1) # FIXME!
one_hot_y = one_hot_formatter.format(labels)
self.labels = labels
if layout == 'ft': # swap axes to (batch, feature, time, channels)
sequences = sequences.swapaxes(1, 2)
log.debug('final dataset shape: {} (b,0,1,c)'.format(sequences.shape))
super(MultiChannelEEGDataset, self).__init__(topo_view=sequences, y=one_hot_y, axes=['b', 0, 1, 'c'])
log.info('generated dataset "{}" with shape X={}={} y={} labels={} '.
format(self.name, self.X.shape, sequences.shape, self.y.shape, self.labels.shape))
if save_matrix_path is not None:
matrix = DenseDesignMatrix(topo_view=sequences, y=one_hot_y, axes=['b', 0, 1, 'c'])
with log_timing(log, 'saving DenseDesignMatrix to {}'.format(save_matrix_path)):
serial.save(save_matrix_path, matrix) | bsd-3-clause |
hershaw/data-science-101 | course/codefiles/datagen.py | 1 | 1955 | import numpy as np
import pandas as pd
from sklearn import datasets
def random_xy(num_points=100):
return pd.DataFrame({
'x': np.random.normal(size=num_points),
'y': np.random.normal(size=num_points)
})
def x_plus_noise(num_points=100, slope=1, randomness=0.1):
if not (0 <= randomness <= 1):
raise ValueError('randomness must be between 0 and 1 (inclusive).')
x = np.random.normal(size=num_points)
return pd.DataFrame({
'x': x,
'y': slope * x + np.random.normal(scale=randomness, size=num_points)
})
def data_3d(num_points=100, randomness=0.1, theta_x=30, theta_z=60):
data = pd.DataFrame({
'x': np.random.normal(scale=10, size=num_points),
'y': np.random.normal(scale=2, size=num_points),
'z': np.random.normal(scale=randomness, size=num_points)
})
return pd.DataFrame(
(np.dot(_rot_mat_x(theta=np.radians(theta_x)),
np.dot(_rot_mat_z(theta=np.radians(theta_z)),
data.values.transpose()))).transpose(),
columns=['x', 'y', 'z']
)
def label_data(df, p=0.1):
""" Labels data as +1 or -1, with probability p of incorrect labeling.
"""
return ((df['x'] > 0).astype(int) * 2 - 1) * np.power(
-1,
np.random.binomial(1, p, df.shape[0])
)
def get_iris():
iris = datasets.load_iris()
return pd.DataFrame({
'sepal_length': iris.data[:, 0],
'sepal_width': iris.data[:, 1],
'petal_length': iris.data[:, 2],
'petal_width': iris.data[:, 3],
'target': iris.target,
})
def _rot_mat_x(theta=np.radians(30)):
return np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
def _rot_mat_z(theta=np.radians(60)):
return np.array([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
])
| mit |
jhseu/tensorflow | tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py | 1 | 15301 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for the ResNet50 model, executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.benchmarks.resnet50 import resnet50
from tensorflow.python.eager.benchmarks.resnet50 import resnet50_test_util
def compute_gradients(model, images, labels, num_replicas=1):
with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.compat.v1.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.compat.v2.summary.write('loss', loss)
if num_replicas != 1:
loss /= num_replicas
# TODO(b/110991947): We can mistakenly trace the gradient call in
# multi-threaded environment. Explicitly disable recording until
# this is fixed.
with tape.stop_recording():
grads = grad_tape.gradient(loss, model.variables)
return grads
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def _events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
"""
records = list(tf.python_io.tf_record_iterator(filepath))
result = []
for r in records:
event = tf.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.compat.v1.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.io.gfile.exists(logdir)
files = tf.io.gfile.listdir(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return _events_from_file(os.path.join(logdir, files[0]))
class ResNet50Test(tf.test.TestCase):
def _apply(self, defun=False, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
with tf.device(device), context.execution_mode(execution_mode):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
context.async_wait()
self.assertEqual((2, 1000), output.shape)
def test_apply(self):
self._apply(defun=False)
def test_apply_async(self):
self._apply(defun=False, execution_mode=context.ASYNC)
def test_apply_with_defun(self):
self._apply(defun=True)
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=context.ASYNC)
def test_apply_no_top(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
def test_apply_with_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
self.assertEqual((2, 2048), output.shape)
def test_apply_no_average_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, average_pooling=False, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 7, 7) if data_format == 'channels_first' else
(2, 7, 7, 2048))
self.assertEqual(output_shape, output.shape)
def test_apply_block3_strides(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
def test_apply_retrieve_intermediates(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
intermediates_dict = {}
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False,
intermediates_dict=intermediates_dict)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
if data_format == 'channels_first':
block_shapes = {
'block0': (2, 64, 112, 112),
'block0mp': (2, 64, 55, 55),
'block1': (2, 256, 55, 55),
'block2': (2, 512, 28, 28),
'block3': (2, 1024, 7, 7),
'block4': (2, 2048, 1, 1),
}
else:
block_shapes = {
'block0': (2, 112, 112, 64),
'block0mp': (2, 55, 55, 64),
'block1': (2, 55, 55, 256),
'block2': (2, 28, 28, 512),
'block3': (2, 7, 7, 1024),
'block4': (2, 1, 1, 2048),
}
for (block_name, block) in intermediates_dict.items():
self.assertEqual(block_shapes[block_name], block.shape)
def _test_train(self, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.compat.v2.summary.experimental.set_step(
tf.train.get_or_create_global_step())
logdir = tempfile.mkdtemp()
with tf.compat.v2.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.compat.v2.summary.record_if(True):
with tf.device(device), context.execution_mode(execution_mode):
optimizer = tf.train.GradientDescentOptimizer(0.1)
images, labels = resnet50_test_util.random_batch(2, data_format)
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
self.assertEqual(320, len(model.variables))
context.async_wait()
events = events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
def test_train(self):
self._test_train()
def test_train_async(self):
self._test_train(execution_mode=context.ASYNC)
def test_no_garbage(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
with tf.device(device):
images, labels = resnet50_test_util.random_batch(2, data_format)
gc.disable()
# Warm up. Note that this first run does create significant amounts of
# garbage to be collected. The hope is that this is a build-only effect,
# and a subsequent training loop will create nothing which needs to be
# collected.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
for _ in range(2):
# Run twice to ensure that garbage that is created on the first
# iteration is no longer accessible.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
# There should be no garbage requiring collection.
self.assertEqual(0, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class ResNet50Benchmarks(tf.test.Benchmark):
def _report(self, label, start, num_iters, device, batch_size, data_format,
num_replicas=1):
resnet50_test_util.report(self, label, start, num_iters, device,
batch_size, data_format, num_replicas=1)
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
# TODO(b/141475121): We need some way to check which batch sizes would
# work using a public API.
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
# Avoid OOM errors with larger batch sizes, which seem to cause errors
# later on even if caught.
#
# TODO(allenl): Base this on device memory; memory limit information
# during the test seems to exclude the amount TensorFlow has allocated,
# which isn't useful.
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = resnet50_test_util.random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
'eager_apply', resnet50_test_util.device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async', resnet50_test_util.device_and_data_format(),
defun=False, execution_mode=context.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply(
'eager_apply_with_defun',
resnet50_test_util.device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = resnet50_test_util.random_batch(
batch_size, data_format)
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
apply_grads = apply_gradients
if defun:
model.call = tf.function(model.call)
apply_grads = tf.function(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
'eager_train', MockIterator,
resnet50_test_util.device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
resnet50_test_util.device_and_data_format(),
defun=False,
execution_mode=context.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
resnet50_test_util.device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset', make_iterator,
resnet50_test_util.device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
resnet50_test_util.device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/linear_model/least_angle.py | 56 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
elvandy/nltools | nltools/data/adjacency.py | 1 | 34227 | from __future__ import division
'''
This data class is for working with similarity/dissimilarity matrices
'''
__author__ = ["Luke Chang"]
__license__ = "MIT"
import os
import pandas as pd
import numpy as np
import six
from copy import deepcopy
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
from sklearn.utils import check_random_state
from scipy.spatial.distance import squareform
from scipy.stats import ttest_1samp
import seaborn as sns
import matplotlib.pyplot as plt
from nltools.stats import (correlation_permutation,
one_sample_permutation,
two_sample_permutation,
summarize_bootstrap,
matrix_permutation)
from nltools.stats import regress as regression
from nltools.plotting import (plot_stacked_adjacency,
plot_silhouette)
from nltools.utils import (all_same,
attempt_to_import,
concatenate,
_bootstrap_apply_func)
from .design_matrix import Design_Matrix
from joblib import Parallel, delayed
# Optional dependencies
nx = attempt_to_import('networkx', 'nx')
MAX_INT = np.iinfo(np.int32).max
class Adjacency(object):
'''
Adjacency is a class to represent Adjacency matrices as a vector rather
than a 2-dimensional matrix. This makes it easier to perform data
manipulation and analyses.
Args:
data: pandas data instance or list of files
matrix_type: (str) type of matrix. Possible values include:
['distance','similarity','directed','distance_flat',
'similarity_flat','directed_flat']
Y: Pandas DataFrame of training labels
**kwargs: Additional keyword arguments
'''
def __init__(self, data=None, Y=None, matrix_type=None, labels=None,
**kwargs):
if matrix_type is not None:
if matrix_type.lower() not in ['distance','similarity','directed',
'distance_flat','similarity_flat',
'directed_flat']:
raise ValueError("matrix_type must be [None,'distance', "
"'similarity','directed','distance_flat', "
"'similarity_flat','directed_flat']")
if data is None:
self.data = np.array([])
self.matrix_type = 'empty'
self.is_single_matrix = np.nan
self.issymmetric = np.nan
elif isinstance(data, list):
if isinstance(data[0], Adjacency):
tmp = concatenate(data)
for item in ['data', 'matrix_type', 'Y','issymmetric']:
setattr(self, item, getattr(tmp,item))
else:
d_all = []; symmetric_all = []; matrix_type_all = []
for d in data:
data_tmp, issymmetric_tmp, matrix_type_tmp, _ = self._import_single_data(d, matrix_type=matrix_type)
d_all.append(data_tmp)
symmetric_all.append(issymmetric_tmp)
matrix_type_all.append(matrix_type_tmp)
if not all_same(symmetric_all):
raise ValueError('Not all matrices are of the same '
'symmetric type.')
if not all_same(matrix_type_all):
raise ValueError('Not all matrices are of the same matrix '
'type.')
self.data = np.array(d_all)
self.issymmetric = symmetric_all[0]
self.matrix_type = matrix_type_all[0]
self.is_single_matrix = False
else:
self.data, self.issymmetric, self.matrix_type, self.is_single_matrix = self._import_single_data(data, matrix_type=matrix_type)
if Y is not None:
if isinstance(Y, six.string_types):
if os.path.isfile(Y):
Y = pd.read_csv(Y, header=None, index_col=None)
if isinstance(Y, pd.DataFrame):
if self.data.shape[0] != len(Y):
raise ValueError("Y does not match the correct size of "
"data")
self.Y = Y
else:
raise ValueError("Make sure Y is a pandas data frame.")
else:
self.Y = pd.DataFrame()
if labels is not None:
if not isinstance(labels, (list, np.ndarray)):
raise ValueError( "Make sure labels is a list or numpy array.")
if self.is_single_matrix:
if len(labels) != self.square_shape()[0]:
raise ValueError('Make sure the length of labels matches the shape of data.')
self.labels = deepcopy(labels)
else:
if len(labels) != len(self):
if len(labels) != self.square_shape()[0]:
raise ValueError('Make sure length of labels either '
'matches the number of Adjacency '
'matrices or the size of a single '
'matrix.')
else:
self.labels = list(labels) * len(self)
else:
if np.all(np.array([len(x) for x in labels]) !=self.square_shape()[0]):
raise ValueError("All lists of labels must be same length as shape of data.")
self.labels = deepcopy(labels)
else:
self.labels = None
def __repr__(self):
return ("%s.%s(shape=%s, square_shape=%s, Y=%s, is_symmetric=%s,"
"matrix_type=%s)") % (
self.__class__.__module__,
self.__class__.__name__,
self.shape(),
self.square_shape(),
len(self.Y),
self.issymmetric,
self.matrix_type)
def __getitem__(self,index):
new = self.copy()
if isinstance(index, int):
new.data = np.array(self.data[index, :]).flatten()
new.is_single_matrix = True
else:
new.data = np.array(self.data[index, :])
if not self.Y.empty:
new.Y = self.Y.iloc[index]
return new
def __len__(self):
if self.is_single_matrix:
return 1
else:
return self.data.shape[0]
def __iter__(self):
for x in range(len(self)):
yield self[x]
def __add__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data + y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = new.data + y.data
return new
def __sub__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data - y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = new.data - y.data
return new
def __mul__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data * y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = np.multiply(new.data, y.data)
return new
def _import_single_data(self, data, matrix_type=None):
''' Helper function to import single data matrix.'''
if isinstance(data, six.string_types):
if os.path.isfile(data):
data = pd.read_csv(data)
else:
raise ValueError('Make sure you have specified a valid file '
'path.')
def test_is_single_matrix(data):
if len(data.shape) == 1:
return True
else:
return False
if matrix_type is not None:
if matrix_type.lower() == 'distance_flat':
matrix_type = 'distance'
data = np.array(data)
issymmetric = True
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() == 'similarity_flat':
matrix_type = 'similarity'
data = np.array(data)
issymmetric = True
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() == 'directed_flat':
matrix_type = 'directed'
data = np.array(data).flatten()
issymmetric = False
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() in ['distance', 'similarity', 'directed']:
if data.shape[0] != data.shape[1]:
raise ValueError('Data matrix must be square')
data = np.array(data)
matrix_type = matrix_type.lower()
if matrix_type in ['distance', 'similarity']:
issymmetric = True
data = data[np.triu_indices(data.shape[0], k=1)]
else:
issymmetric = False
if isinstance(data, pd.DataFrame):
data = data.values.flatten()
elif isinstance(data, np.ndarray):
data = data.flatten()
is_single_matrix = True
else:
if len(data.shape) == 1: # Single Vector
try:
data = squareform(data)
except ValueError:
print('Data is not flattened upper triangle from '
'similarity/distance matrix or flattened directed '
'matrix.')
is_single_matrix = True
elif data.shape[0] == data.shape[1]: # Square Matrix
is_single_matrix = True
else: # Rectangular Matrix
data_all = deepcopy(data)
try:
data = squareform(data_all[0, :])
except ValueError:
print('Data is not flattened upper triangle from multiple '
'similarity/distance matrices or flattened directed '
'matrices.')
is_single_matrix = False
# Test if matrix is symmetrical
if np.all(data[np.triu_indices(data.shape[0], k=1)] == data.T[np.triu_indices(data.shape[0], k=1)]):
issymmetric = True
else:
issymmetric = False
# Determine matrix type
if issymmetric:
if np.sum(np.diag(data)) == 0:
matrix_type = 'distance'
elif np.sum(np.diag(data)) == data.shape[0]:
matrix_type = 'similarity'
data = data[np.triu_indices(data.shape[0], k=1)]
else:
matrix_type = 'directed'
data = data.flatten()
if not is_single_matrix:
data = data_all
return (data, issymmetric, matrix_type, is_single_matrix)
def isempty(self):
'''Check if Adjacency object is empty'''
return bool(self.matrix_type is 'empty')
def squareform(self):
'''Convert adjacency back to squareform'''
if self.issymmetric:
if self.is_single_matrix:
return squareform(self.data)
else:
return [squareform(x.data) for x in self]
else:
if self.is_single_matrix:
return self.data.reshape(int(np.sqrt(self.data.shape[0])),
int(np.sqrt(self.data.shape[0])))
else:
return [x.data.reshape(int(np.sqrt(x.data.shape[0])),
int(np.sqrt(x.data.shape[0]))) for x in self]
def plot(self, limit=3, *args, **kwargs):
''' Create Heatmap of Adjacency Matrix'''
if self.is_single_matrix:
f, a = plt.subplots(nrows=1, figsize=(7, 5))
if self.labels is None:
sns.heatmap(self.squareform(), square=True, ax=a,
*args, **kwargs)
else:
sns.heatmap(self.squareform(), square=True, ax=a,
xticklabels=self.labels,
yticklabels=self.labels,
*args, **kwargs)
else:
n_subs = np.minimum(len(self), limit)
f, a = plt.subplots(nrows=n_subs, figsize=(7, len(self)*5))
if self.labels is None:
for i in range(n_subs):
sns.heatmap(self[i].squareform(), square=True, ax=a[i],
*args, **kwargs)
else:
for i in range(n_subs):
sns.heatmap(self[i].squareform(), square=True,
xticklabels=self.labels[i],
yticklabels=self.labels[i],
ax=a[i], *args, **kwargs)
return f
def mean(self, axis=0):
''' Calculate mean of Adjacency
Args:
axis: calculate mean over features (0) or data (1).
For data it will be on upper triangle.
Returns:
mean: float if single, adjacency if axis=0, np.array if axis=1
and multiple
'''
if self.is_single_matrix:
return np.mean(self.data)
else:
if axis == 0:
return Adjacency(data=np.mean(self.data, axis=axis),
matrix_type=self.matrix_type + '_flat')
elif axis == 1:
return np.mean(self.data, axis=axis)
def std(self, axis=0):
''' Calculate standard deviation of Adjacency
Args:
axis: calculate std over features (0) or data (1).
For data it will be on upper triangle.
Returns:
std: float if single, adjacency if axis=0, np.array if axis=1 and
multiple
'''
if self.is_single_matrix:
return np.std(self.data)
else:
if axis == 0:
return Adjacency(data=np.std(self.data, axis=axis),
matrix_type=self.matrix_type + '_flat')
elif axis == 1:
return np.std(self.data, axis=axis)
def shape(self):
''' Calculate shape of data. '''
return self.data.shape
def square_shape(self):
''' Calculate shape of squareform data. '''
if self.matrix_type is 'empty':
return np.array([])
else:
if self.is_single_matrix:
return self.squareform().shape
else:
return self[0].squareform().shape
def copy(self):
''' Create a copy of Adjacency object.'''
return deepcopy(self)
def append(self, data):
''' Append data to Adjacency instance
Args:
data: Adjacency instance to append
Returns:
out: new appended Adjacency instance
'''
if not isinstance(data, Adjacency):
raise ValueError('Make sure data is a Adjacency instance.')
if self.isempty():
out = data.copy()
else:
out = self.copy()
if self.square_shape() != data.square_shape():
raise ValueError('Data is not the same shape as Adjacency '
'instance.')
out.data = np.vstack([self.data, data.data])
out.is_single_matrix = False
if out.Y.size:
out.Y = self.Y.append(data.Y)
return out
def write(self, file_name, method='long'):
''' Write out Adjacency object to csv file.
Args:
file_name (str): name of file name to write
method (str): method to write out data ['long','square']
'''
if method not in ['long', 'square']:
raise ValueError('Make sure method is ["long","square"].')
if self.is_single_matrix:
if method is 'long':
out = pd.DataFrame(self.data).to_csv(file_name, index=None)
elif method is 'square':
out = pd.DataFrame(self.squareform()).to_csv(file_name,
index=None)
else:
if method is 'long':
out = pd.DataFrame(self.data).to_csv(file_name, index=None)
elif method is 'square':
raise NotImplementedError('Need to decide how we should write '
'out multiple matrices. As separate '
'files?')
def similarity(self, data, plot=False, perm_type='2d', n_permute=5000, metric='spearman', **kwargs):
''' Calculate similarity between two Adjacency matrices.
Default is to use spearman correlation and permutation test.
Args:
data: Adjacency data, or 1-d array same size as self.data
perm_type: '1d','2d', or None
metric: 'spearman','pearson','kendall'
'''
if not isinstance(data, Adjacency):
data2 = Adjacency(data)
else:
data2 = data.copy()
if perm_type is None:
n_permute=0
similarity_func = correlation_permutation
elif perm_type == '1d':
similarity_func = correlation_permutation
elif perm_type == '2d':
similarity_func = matrix_permutation
if self.is_single_matrix:
if plot:
plot_stacked_adjacency(self, data)
return similarity_func(self.data, data2.data, metric=metric, n_permute=n_permute, **kwargs)
else:
if plot:
_, a = plt.subplots(len(self))
for i in a:
plot_stacked_adjacency(self, data, ax=i)
return [similarity_func(x.data, data2.data, metric=metric, n_permute=n_permute, **kwargs) for x in self]
def distance(self, method='correlation', **kwargs):
''' Calculate distance between images within an Adjacency() instance.
Args:
method: type of distance metric (can use any scikit learn or
sciypy metric)
Returns:
dist: Outputs a 2D distance matrix.
'''
return Adjacency(pairwise_distances(self.data, metric=method, **kwargs),
matrix_type='distance')
def threshold(self, upper=None, lower=None, binarize=False):
'''Threshold Adjacency instance. Provide upper and lower values or
percentages to perform two-sided thresholding. Binarize will return
a mask image respecting thresholds if provided, otherwise respecting
every non-zero value.
Args:
upper: (float or str) Upper cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
lower: (float or str) Lower cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
binarize (bool): return binarized image respecting thresholds if
provided, otherwise binarize on every non-zero value;
default False
Returns:
Adjacency: thresholded Adjacency instance
'''
b = self.copy()
if isinstance(upper, six.string_types):
if upper[-1] is '%':
upper = np.percentile(b.data, float(upper[:-1]))
if isinstance(lower, six.string_types):
if lower[-1] is '%':
lower = np.percentile(b.data, float(lower[:-1]))
if upper and lower:
b.data[(b.data < upper) & (b.data > lower)] = 0
elif upper and not lower:
b.data[b.data < upper] = 0
elif lower and not upper:
b.data[b.data > lower] = 0
if binarize:
b.data[b.data != 0] = 1
return b
def to_graph(self):
''' Convert Adjacency into networkx graph. only works on
single_matrix for now.'''
if self.is_single_matrix:
if self.matrix_type == 'directed':
G = nx.DiGraph(self.squareform())
else:
G = nx.Graph(self.squareform())
if self.labels is not None:
labels = {x:y for x,y in zip(G.nodes,self.labels)}
nx.relabel_nodes(G, labels, copy=False)
return G
else:
raise NotImplementedError('This function currently only works on '
'single matrices.')
def ttest(self, permutation=False, **kwargs):
''' Calculate ttest across samples.
Args:
permutation: (bool) Run ttest as permutation. Note this can be very slow.
Returns:
out: (dict) contains Adjacency instances of t values (or mean if
running permutation) and Adjacency instance of p values.
'''
if self.is_single_matrix:
raise ValueError('t-test cannot be run on single matrices.')
if permutation:
t = []; p = []
for i in range(self.data.shape[1]):
stats = one_sample_permutation(self.data[:, i], **kwargs)
t.append(stats['mean'])
p.append(stats['p'])
t = Adjacency(np.array(t))
p = Adjacency(np.array(p))
else:
t = self.mean().copy()
p = deepcopy(t)
t.data, p.data = ttest_1samp(self.data, 0, 0)
return {'t': t, 'p':p}
def plot_label_distance(self, labels=None, ax=None):
''' Create a violin plot indicating within and between label distance
Args:
labels (np.array): numpy array of labels to plot
Returns:
violin plot handles
'''
if not self.is_single_matrix:
raise ValueError('This function only works on single adjacency '
'matrices.')
distance = pd.DataFrame(self.squareform())
if labels is None:
labels = np.array(deepcopy(self.labels))
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
for i in np.unique(labels):
tmp_w = pd.DataFrame(columns=out.columns, index=None)
tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_w['Type'] = 'Within'
tmp_w['Group'] = i
tmp_b = pd.DataFrame(columns=out.columns, index=None)
tmp_b['Distance'] = distance.loc[labels != i, labels != i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_b['Type'] = 'Between'
tmp_b['Group'] = i
out = out.append(tmp_w).append(tmp_b)
f = sns.violinplot(x="Group", y="Distance", hue="Type", data=out, split=True, inner='quartile',
palette={"Within": "lightskyblue", "Between": "red"}, ax=ax)
f.set_ylabel('Average Distance')
f.set_title('Average Group Distance')
return f
def stats_label_distance(self, labels=None, n_permute=5000, n_jobs=-1):
''' Calculate permutation tests on within and between label distance.
Args:
labels (np.array): numpy array of labels to plot
n_permute (int): number of permutations to run (default=5000)
Returns:
dict: dictionary of within and between group differences
and p-values
'''
if not self.is_single_matrix:
raise ValueError('This function only works on single adjacency '
'matrices.')
distance = pd.DataFrame(self.squareform())
if labels is not None:
labels = deepcopy(self.labels)
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
for i in np.unique(labels):
tmp_w = pd.DataFrame(columns=out.columns, index=None)
tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_w['Type'] = 'Within'
tmp_w['Group'] = i
tmp_b = pd.DataFrame(columns=out.columns, index=None)
tmp_b['Distance'] = distance.loc[labels == i, labels != i].values.flatten()
tmp_b['Type'] = 'Between'
tmp_b['Group'] = i
out = out.append(tmp_w).append(tmp_b)
stats = dict()
for i in np.unique(labels):
# Within group test
tmp1 = out.loc[(out['Group'] == i) & (out['Type'] == 'Within'), 'Distance']
tmp2 = out.loc[(out['Group'] == i) & (out['Type'] == 'Between'), 'Distance']
stats[str(i)] = two_sample_permutation(tmp1, tmp2,
n_permute=n_permute, n_jobs=n_jobs)
return stats
def plot_silhouette(self, labels=None, ax=None, permutation_test=True,
n_permute=5000, **kwargs):
'''Create a silhouette plot'''
distance = pd.DataFrame(self.squareform())
if labels is None:
labels = np.array(deepcopy(self.labels))
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
(f, outAll) = plot_silhouette(distance, labels, ax=None,
permutation_test=True,
n_permute=5000, **kwargs)
return (f,outAll)
def bootstrap(self, function, n_samples=5000, save_weights=False,
n_jobs=-1, random_state=None, *args, **kwargs):
'''Bootstrap an Adjacency method.
Example Useage:
b = dat.bootstrap('mean', n_samples=5000)
b = dat.bootstrap('predict', n_samples=5000, algorithm='ridge')
b = dat.bootstrap('predict', n_samples=5000, save_weights=True)
Args:
function: (str) method to apply to data for each bootstrap
n_samples: (int) number of samples to bootstrap with replacement
save_weights: (bool) Save each bootstrap iteration
(useful for aggregating many bootstraps on a cluster)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.Returns:
output: summarized studentized bootstrap output
'''
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_samples)
bootstrapped = Parallel(n_jobs=n_jobs)(
delayed(_bootstrap_apply_func)(self,
function, random_state=seeds[i], *args, **kwargs)
for i in range(n_samples))
bootstrapped = Adjacency(bootstrapped)
return summarize_bootstrap(bootstrapped, save_weights=save_weights)
def plot_mds(self, n_components=2, metric=True, labels_color=None,
cmap=plt.cm.hot_r, n_jobs=-1, view=(30, 20),
figsize = [12,8], ax = None, *args, **kwargs):
''' Plot Multidimensional Scaling
Args:
n_components: (int) Number of dimensions to project (can be 2 or 3)
metric: (bool) Perform metric or non-metric dimensional scaling; default
labels_color: (str) list of colors for labels, if len(1) then make all same color
n_jobs: (int) Number of parallel jobs
view: (tuple) view for 3-Dimensional plot; default (30,20)
Returns:
fig: returns matplotlib figure
'''
if self.matrix_type != 'distance':
raise ValueError("MDS only works on distance matrices.")
if not self.is_single_matrix:
raise ValueError("MDS only works on single matrices.")
if n_components not in [2,3]:
raise ValueError('Cannot plot {0}-d image'.format(n_components))
if labels_color is not None:
if self.labels is None:
raise ValueError("Make sure that Adjacency object has labels specified.")
if len(self.labels) != len(labels_color):
raise ValueError("Length of labels_color must match self.labels.")
# Run MDS
mds = MDS(n_components=n_components, metric=metric, n_jobs=n_jobs,
dissimilarity="precomputed", *args, **kwargs)
proj = mds.fit_transform(self.squareform())
# Create Plot
if ax == None: # Create axis
returnFig = True
fig = plt.figure(figsize=figsize)
if n_components == 3:
ax = fig.add_subplot(111, projection='3d')
ax.view_init(*view)
elif n_components == 2:
ax = fig.add_subplot(111)
# Plot dots
if n_components == 3:
ax.scatter(proj[:, 0], proj[:, 1], proj[:, 2], s=1, c='k')
elif n_components == 2:
ax.scatter(proj[:, 0], proj[:, 1], s=1, c='k')
# Plot labels
if labels_color is None:
labels_color = ['black'] * len(self.labels)
if n_components == 3:
for ((x, y, z), label, color) in zip(proj, self.labels, labels_color):
ax.text(x, y, z, label, color='white', #color,
bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
else:
for ((x, y), label, color) in zip(proj, self.labels, labels_color):
ax.text(x, y, label, color='white', #color,
bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if returnFig:
return fig
def distance_to_similarity(self, beta=1):
'''Convert distance matrix to similarity matrix
Args:
beta: parameter to scale exponential function (default: 1)
Returns:
Adjacency object
'''
if self.matrix_type == 'distance':
return Adjacency(np.exp(-beta*self.squareform()/self.squareform().std()),
labels=self.labels, matrix_type='similarity')
else:
raise ValueError('Matrix is not a distance matrix.')
def similarity_to_distance(self):
'''Convert similarity matrix to distance matrix'''
if self.matrix_type == 'similarity':
return Adjacency(1-self.squareform(),
labels=self.labels, matrix_type='distance')
else:
raise ValueError('Matrix is not a similarity matrix.')
def within_cluster_mean(self, clusters = None):
''' This function calculates mean within cluster labels
Args:
clusters: list of cluster labels
Returns:
dict: within cluster means
'''
distance=pd.DataFrame(self.squareform())
clusters = np.array(clusters)
if len(clusters) != distance.shape[0]:
raise ValueError('Cluster labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Mean','Label'],index=None)
out = {}
for i in list(set(clusters)):
out[i] = np.mean(distance.loc[clusters==i,clusters==i].values[np.triu_indices(sum(clusters==i),k=1)])
return out
def regress(self, X, mode='ols', **kwargs):
''' Run a regression on an adjacency instance.
You can decompose an adjacency instance with another adjacency instance.
You can also decompose each pixel by passing a design_matrix instance.
Args:
X: Design matrix can be an Adjacency or Design_Matrix instance
method: type of regression (default: ols)
Returns:
'''
stats = {}
if isinstance(X, Adjacency):
if X.square_shape()[0] != self.square_shape()[0]:
raise ValueError('Adjacency instances must be the same size.')
b,t,p,_,res = regression(X.data.T, self.data, mode=mode, **kwargs)
stats['beta'],stats['t'],stats['p'],stats['residual'] = (b,t,p,res)
elif isinstance(X, Design_Matrix):
if X.shape[0] != len(self):
raise ValueError('Design matrix must have same number of observations as Adjacency')
b,t,p,df,res = regression(X, self.data, mode=mode, **kwargs)
mode = 'ols'
stats['beta'], stats['t'], stats['p'] = [x for x in self[:3]]
stats['beta'].data, stats['t'].data, stats['p'].data = b.squeeze(), t.squeeze(), p.squeeze()
stats['residual'] = self.copy()
stats['residual'].data = res
else:
raise ValueError('X must be a Design_Matrix or Adjacency Instance.')
return stats
| mit |
vitaly-krugl/nupic | examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py | 10 | 15037 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'sum')],
'days': 0,
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 1,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': 100,
u'source': u'file://extra/hotgym/hotgym.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1, 5]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
MetricSpec(field=u'consumption', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 5], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
vitaly-krugl/nupic | examples/opf/experiments/missing_record/make_datasets.py | 15 | 4638 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets for the multi-step prediction experiments
"""
import os
import random
import datetime
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateSimple(filename="simple.csv", numSequences=1, elementsPerSeq=3,
numRepeats=10):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
At the end of the dataset, we introduce missing records so that test
code can insure that the model didn't get confused by them.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('timestamp', 'datetime', 'T'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
# Put 1 hour between each record
timestamp = datetime.datetime(year=2012, month=1, day=1, hour=0, minute=0,
second=0)
timeDelta = datetime.timedelta(hours=1)
# Write out the sequences without missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Now, write some out with missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Write out some more of the sequences *without* missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options]
Generate artificial datasets for testing multi-step prediction """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the sample datasets
_generateSimple('simple_0.csv', numSequences=1, elementsPerSeq=3,
numRepeats=10)
| agpl-3.0 |
hfut721/RPN | tools/rpn_generate.py | 12 | 2994 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast/er/ R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Generate RPN proposals."""
import _init_paths
import numpy as np
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import cPickle
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# RPN test settings
cfg.TEST.RPN_PRE_NMS_TOP_N = -1
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb_boxes = imdb_proposals(net, imdb)
output_dir = get_output_dir(imdb, net)
rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_file)
| mit |
franosincic/edx-platform | lms/djangoapps/course_api/blocks/transformers/__init__.py | 37 | 1989 | """
Course API Block Transformers
"""
from .student_view import StudentViewTransformer
from .block_counts import BlockCountsTransformer
from .navigation import BlockNavigationTransformer
class SupportedFieldType(object):
"""
Metadata about fields supported by different transformers
"""
def __init__(
self,
block_field_name,
transformer=None,
requested_field_name=None,
serializer_field_name=None,
default_value=None
):
self.transformer = transformer
self.block_field_name = block_field_name
self.requested_field_name = requested_field_name or block_field_name
self.serializer_field_name = serializer_field_name or self.requested_field_name
self.default_value = default_value
# A list of metadata for additional requested fields to be used by the
# BlockSerializer` class. Each entry provides information on how that field can
# be requested (`requested_field_name`), can be found (`transformer` and
# `block_field_name`), and should be serialized (`serializer_field_name` and
# `default_value`).
SUPPORTED_FIELDS = [
SupportedFieldType('category', requested_field_name='type'),
SupportedFieldType('display_name', default_value=''),
SupportedFieldType('graded'),
SupportedFieldType('format'),
# 'student_view_data'
SupportedFieldType(StudentViewTransformer.STUDENT_VIEW_DATA, StudentViewTransformer),
# 'student_view_multi_device'
SupportedFieldType(StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE, StudentViewTransformer),
# set the block_field_name to None so the entire data for the transformer is serialized
SupportedFieldType(None, BlockCountsTransformer, BlockCountsTransformer.BLOCK_COUNTS),
SupportedFieldType(
BlockNavigationTransformer.BLOCK_NAVIGATION,
BlockNavigationTransformer,
requested_field_name='nav_depth',
serializer_field_name='descendants',
)
]
| agpl-3.0 |
ShridharSattur/WhereHows | wherehows-etl/src/main/resources/jython/DatasetTreeBuilder.py | 6 | 3348 | #
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import calendar
import json
import shutil
import sys
from com.ziclix.python.sql import zxJDBC
from wherehows.common import Constant
from datetime import datetime
from jython.ElasticSearchIndex import ElasticSearchIndex
class DatasetTreeBuilder:
def __init__(self, args):
username = args[Constant.WH_DB_USERNAME_KEY]
password = args[Constant.WH_DB_PASSWORD_KEY]
jdbc_driver = args[Constant.WH_DB_DRIVER_KEY]
jdbc_url = args[Constant.WH_DB_URL_KEY]
conn_mysql = zxJDBC.connect(jdbc_url, username, password, jdbc_driver)
cur = conn_mysql.cursor()
try:
query = "select distinct id, concat(SUBSTRING_INDEX(urn, ':///', 1), '/', SUBSTRING_INDEX(urn, ':///', -1)) p from dict_dataset order by 2"
cur.execute(query)
datasets = cur.fetchall()
self.dataset_dict = dict()
for dataset in datasets:
current = self.dataset_dict
path_arr = dataset[1].split('/')
for name in path_arr:
current = current.setdefault(name, {})
current["__ID_OF_DATASET__"] = dataset[0]
self.file_name = args[Constant.DATASET_TREE_FILE_NAME_KEY]
self.value = []
finally:
cur.close()
conn_mysql.close()
def build_trie_helper(self, depth, path, current, current_dict):
nodes = []
child_nodes = []
for child_key in sorted(current_dict.keys()):
if child_key == "__ID_OF_DATASET__":
# Find a leaf node
nodes.append({'title': current, 'level': depth, 'path': path, 'id': current_dict["__ID_OF_DATASET__"], 'children': [], 'folder': 0})
else:
delimiter = '/'
if depth == 1:
delimiter = ':///'
child_nodes.extend(self.build_trie_helper(depth + 1, path + delimiter + child_key, child_key, current_dict[child_key]))
if len(child_nodes) != 0:
nodes.append({'title': current, 'level': depth, 'path': path, 'children': child_nodes, 'folder': 1})
return nodes
def build_trie(self):
for top_key in sorted(self.dataset_dict.keys()):
self.value.extend(self.build_trie_helper(1, top_key, top_key, self.dataset_dict[top_key]))
def write_to_file(self):
tmp_file_name = self.file_name + ".tmp"
f = open(tmp_file_name, "w")
f.write(json.dumps({'children': self.value}))
f.close()
shutil.move(tmp_file_name, self.file_name)
def run(self):
self.build_trie()
self.write_to_file()
def saveTreeInElasticSearchIfApplicable(args):
es_url = args.get(Constant.ELASTICSEARCH_URL_KEY, None)
es_port = args.get(Constant.ELASTICSEARCH_PORT_KEY, None)
if es_url and es_port:
esi = ElasticSearchIndex(args)
d = datetime.utcnow()
unixtime = calendar.timegm(d.utctimetuple())
esi.update_dataset(unixtime)
if __name__ == "__main__":
d = DatasetTreeBuilder(sys.argv[1])
d.run()
saveTreeInElasticSearchIfApplicable(sys.argv[1])
| apache-2.0 |
classrank/ClassRank | classrank/filters/collabfilter.py | 1 | 2539 | import numpy as np
from sklearn.decomposition import TruncatedSVD
from scipy import sparse
from classrank.filters.datawrapper import DataWrapper
class CollaborativeFilter:
#This takes in a matrix
def __init__(self, data=dict(), numRecommendations=1, db=None, metric="rating", school="gatech"):
self.dataset = DataWrapper(instances=data, db=db, school=school, metric=metric)
self.updated = False
self.sparsedata = None
self.sparseifyData()
try:
self.svd = TruncatedSVD(n_components=numRecommendations)
self.model = self.svd.inverse_transform(self.svd.fit_transform(self.sparsedata))
except ValueError:
self.svd = None
self.model = None
raise ValueError("Not enough ratings for predictions")
def getRecommendation(self, instances):
if(self.updated):
self.sparseifyData()
self.model = self.svd.inverse_transform(self.svd.fit_transform(self.sparsedata))
self.updated = False
ret = {}
for instance in instances:
values = {}
for feature in instances[instance]:
try:
row = self.dataset.getRow(instance)
column = self.dataset.getColumn(feature)
values[feature] = self.model[row][column]
except KeyError:
values[feature] = None
ret[instance] = values
return ret
def updateValues(self, instances):
self.dataset.addData(instances)
self.updated = True
def forceModelUpdate(self):
self.updated = False
self.sparseifyData()
self.model = self.svd.inverse_transform(self.svd.fit_transform(self.sparsedata))
def sparseifyData(self):
data = self.dataset.getData()
sparsematrix = sparse.dok_matrix((len(data), len(data[0])))
for i in range(len(data)):
for j in range(len(data[i])):
if data[i][j] is not None:
sparsematrix[i, j] = data[i][j]
self.sparsedata = sparsematrix
def getSparseData(self):
return self.sparsedata
def getModel(self):
return self.model
def getData(self, *args):
if len(args) == 2:
return self.dataset.getData(args[0], args[1])
else:
return self.dataset.getData()
def getUpdated(self):
return self.updated
def getDataDict(self):
return self.dataset.getDataDict()
| gpl-2.0 |
msultan/msmbuilder | msmbuilder/cluster/__init__.py | 8 | 3364 | # Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors: Matthew Harrigan <matthew.harrigan@outlook.com>, Brooke Husic <brookehusic@gmail.com>
# Copyright (c) 2016, Stanford University
# All rights reserved.
from __future__ import absolute_import, print_function, division
import warnings
from sklearn import cluster
try:
# sklearn >= 0.18
from sklearn.mixture import GaussianMixture as sklearn_GMM
except ImportError:
from sklearn.mixture import GMM as sklearn_GMM
from ..base import BaseEstimator
from .base import MultiSequenceClusterMixin
from .kcenters import KCenters
from .ndgrid import NDGrid
from .agglomerative import LandmarkAgglomerative
from .regularspatial import RegularSpatial
from .kmedoids import KMedoids
from .minibatchkmedoids import MiniBatchKMedoids
from .apm import APM
warnings.filterwarnings("once", '', DeprecationWarning, r'^sklearn\.')
__all__ = ['KMeans', 'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'GMM', 'SpectralClustering', 'KCenters', 'NDGrid',
'LandmarkAgglomerative', 'RegularSpatial', 'KMedoids',
'MiniBatchKMedoids', 'MultiSequenceClusterMixin', 'APM',
'AgglomerativeClustering']
def _replace_labels(doc):
"""Really hacky find-and-replace method that modifies one of the sklearn
docstrings to change the semantics of labels_ for the subclasses"""
lines = doc.splitlines()
labelstart, labelend = None, None
foundattributes = False
for i, line in enumerate(lines):
stripped = line.strip()
if stripped == 'Attributes':
foundattributes = True
if foundattributes and not labelstart and stripped.startswith('labels_'):
labelstart = len('\n'.join(lines[:i])) + 1
if labelstart and not labelend and stripped == '':
labelend = len('\n'.join(lines[:i + 1]))
if labelstart is None or labelend is None:
return doc
replace = '\n'.join([
' labels_ : list of arrays, each of shape [sequence_length, ]',
' The label of each point is an integer in [0, n_clusters).',
'',
])
return doc[:labelstart] + replace + doc[labelend:]
class KMeans(MultiSequenceClusterMixin, cluster.KMeans, BaseEstimator):
__doc__ = _replace_labels(cluster.KMeans.__doc__)
class MiniBatchKMeans(MultiSequenceClusterMixin, cluster.MiniBatchKMeans,
BaseEstimator):
__doc__ = _replace_labels(cluster.MiniBatchKMeans.__doc__)
class AffinityPropagation(MultiSequenceClusterMixin,
cluster.AffinityPropagation, BaseEstimator):
__doc__ = _replace_labels(cluster.AffinityPropagation.__doc__)
class MeanShift(MultiSequenceClusterMixin, cluster.MeanShift, BaseEstimator):
__doc__ = _replace_labels(cluster.MeanShift.__doc__)
class SpectralClustering(MultiSequenceClusterMixin, cluster.SpectralClustering,
BaseEstimator):
__doc__ = _replace_labels(cluster.SpectralClustering.__doc__)
class AgglomerativeClustering(MultiSequenceClusterMixin,
cluster.AgglomerativeClustering,
BaseEstimator):
__doc__ = _replace_labels(cluster.AgglomerativeClustering.__doc__)
class GMM(MultiSequenceClusterMixin, sklearn_GMM, BaseEstimator):
__doc__ = _replace_labels(sklearn_GMM.__doc__)
| lgpl-2.1 |
uber/pyro | examples/inclined_plane.py | 1 | 5455 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import numpy as np
import torch
import pyro
from pyro.distributions import Normal, Uniform
from pyro.infer import EmpiricalMarginal, Importance
"""
Samantha really likes physics---but she likes Pyro even more. Instead of using
calculus to do her physics lab homework (which she could easily do), she's going
to use bayesian inference. The problem setup is as follows. In lab she observed
a little box slide down an inclined plane (length of 2 meters and with an incline of
30 degrees) 20 times. Each time she measured and recorded the descent time. The timing
device she used has a known measurement error of 20 milliseconds. Using the observed
data, she wants to infer the coefficient of friction mu between the box and the inclined
plane. She already has (deterministic) python code that can simulate the amount of time
that it takes the little box to slide down the inclined plane as a function of mu. Using
Pyro, she can reverse the simulator and infer mu from the observed descent times.
"""
little_g = 9.8 # m/s/s
mu0 = 0.12 # actual coefficient of friction in the experiment
time_measurement_sigma = 0.02 # observation noise in seconds (known quantity)
# the forward simulator, which does numerical integration of the equations of motion
# in steps of size dt, and optionally includes measurement noise
def simulate(mu, length=2.0, phi=np.pi / 6.0, dt=0.005, noise_sigma=None):
T = torch.zeros(())
velocity = torch.zeros(())
displacement = torch.zeros(())
acceleration = (
torch.tensor(little_g * np.sin(phi)) - torch.tensor(little_g * np.cos(phi)) * mu
)
if (
acceleration.numpy() <= 0.0
): # the box doesn't slide if the friction is too large
return torch.tensor(1.0e5) # return a very large time instead of infinity
while (
displacement.numpy() < length
): # otherwise slide to the end of the inclined plane
displacement += velocity * dt
velocity += acceleration * dt
T += dt
if noise_sigma is None:
return T
else:
return T + noise_sigma * torch.randn(())
# analytic formula that the simulator above is computing via
# numerical integration (no measurement noise)
def analytic_T(mu, length=2.0, phi=np.pi / 6.0):
numerator = 2.0 * length
denominator = little_g * (np.sin(phi) - mu * np.cos(phi))
return np.sqrt(numerator / denominator)
# generate N_obs observations using simulator and the true coefficient of friction mu0
print("generating simulated data using the true coefficient of friction %.3f" % mu0)
N_obs = 20
torch.manual_seed(2)
observed_data = torch.tensor(
[
simulate(torch.tensor(mu0), noise_sigma=time_measurement_sigma)
for _ in range(N_obs)
]
)
observed_mean = np.mean([T.item() for T in observed_data])
# define model with uniform prior on mu and gaussian noise on the descent time
def model(observed_data):
mu_prior = Uniform(0.0, 1.0)
mu = pyro.sample("mu", mu_prior)
def observe_T(T_obs, obs_name):
T_simulated = simulate(mu)
T_obs_dist = Normal(T_simulated, torch.tensor(time_measurement_sigma))
pyro.sample(obs_name, T_obs_dist, obs=T_obs)
for i, T_obs in enumerate(observed_data):
observe_T(T_obs, "obs_%d" % i)
return mu
def main(args):
# create an importance sampler (the prior is used as the proposal distribution)
importance = Importance(model, guide=None, num_samples=args.num_samples)
# get posterior samples of mu (which is the return value of model)
# from the raw execution traces provided by the importance sampler.
print("doing importance sampling...")
emp_marginal = EmpiricalMarginal(importance.run(observed_data))
# calculate statistics over posterior samples
posterior_mean = emp_marginal.mean
posterior_std_dev = emp_marginal.variance.sqrt()
# report results
inferred_mu = posterior_mean.item()
inferred_mu_uncertainty = posterior_std_dev.item()
print(
"the coefficient of friction inferred by pyro is %.3f +- %.3f"
% (inferred_mu, inferred_mu_uncertainty)
)
# note that, given the finite step size in the simulator, the simulated descent times will
# not precisely match the numbers from the analytic result.
# in particular the first two numbers reported below should match each other pretty closely
# but will be systematically off from the third number
print(
"the mean observed descent time in the dataset is: %.4f seconds" % observed_mean
)
print(
"the (forward) simulated descent time for the inferred (mean) mu is: %.4f seconds"
% simulate(posterior_mean).item()
)
print(
(
"disregarding measurement noise, elementary calculus gives the descent time\n"
+ "for the inferred (mean) mu as: %.4f seconds"
)
% analytic_T(posterior_mean.item())
)
"""
################## EXERCISE ###################
# vectorize the computations in this example! #
###############################################
"""
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument("-n", "--num-samples", default=500, type=int)
args = parser.parse_args()
main(args)
| apache-2.0 |
matthew-tucker/mne-python | examples/connectivity/plot_mne_inverse_psi_visual.py | 18 | 4555 | """
=====================================================================
Compute Phase Slope Index (PSI) in source space for a visual stimulus
=====================================================================
This example demonstrates how the Phase Slope Index (PSI) [1] can be computed
in source space based on single trial dSPM source estimates. In addition,
the example shows advanced usage of the connectivity estimation routines
by first extracting a label time course for each epoch and then combining
the label time course with the single trial source estimates to compute the
connectivity.
The result clearly shows how the activity in the visual label precedes more
widespread activity (a postivive PSI means the label time course is leading).
References
----------
[1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
pp. 1-4, Jun. 2008.
"""
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
from mne.connectivity import seed_target_indices, phase_slope_index
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_label = data_path + '/MEG/sample/labels/Vis-lh.label'
event_id, tmin, tmax = 4, -0.2, 0.3
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = Raw(fname_raw)
events = mne.read_events(fname_event)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. Note that since we are passing
# the output to both extract_label_time_course and the phase_slope_index
# functions, we have to use "return_generator=False", since it is only possible
# to iterate over generators once.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=False)
# Now, we generate seed time series by averaging the activity in the left
# visual corex
label = mne.read_label(fname_label)
src = inverse_operator['src'] # the source space used
seed_ts = mne.extract_label_time_course(stcs, label, src, mode='mean_flip')
# Combine the seed time course with the source estimates. There will be a total
# of 7500 signals:
# index 0: time course extracted from label
# index 1..7499: dSPM source space time courses
comb_ts = zip(seed_ts, stcs)
# Construct indices to estimate connectivity between the label time course
# and all source space time courses
vertices = [src[i]['vertno'] for i in range(2)]
n_signals_tot = 1 + len(vertices[0]) + len(vertices[1])
indices = seed_target_indices([0], np.arange(1, n_signals_tot))
# Compute the PSI in the frequency range 8Hz..30Hz. We exclude the baseline
# period from the connectivity estimation
fmin = 8.
fmax = 30.
tmin_con = 0.
sfreq = raw.info['sfreq'] # the sampling frequency
psi, freqs, times, n_epochs, _ = phase_slope_index(
comb_ts, mode='multitaper', indices=indices, sfreq=sfreq,
fmin=fmin, fmax=fmax, tmin=tmin_con)
# Generate a SourceEstimate with the PSI. This is simple since we used a single
# seed (inspect the indices variable to see how the PSI scores are arranged in
# the output)
psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1,
subject='sample')
# Now we can visualize the PSI using the plot method. We use a custom colormap
# to show signed values
v_max = np.max(np.abs(psi))
brain = psi_stc.plot(surface='inflated', hemi='lh',
time_label='Phase Slope Index (PSI)',
subjects_dir=subjects_dir,
clim=dict(kind='percent', pos_lims=(95, 97.5, 100)))
brain.show_view('medial')
brain.add_label(fname_label, color='green', alpha=0.7)
| bsd-3-clause |
uber/pyro | pyro/infer/trace_mean_field_elbo.py | 1 | 8173 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import warnings
import weakref
import torch
from torch.distributions import kl_divergence
import pyro.ops.jit
from pyro.distributions.util import scale_and_mask
from pyro.infer.trace_elbo import Trace_ELBO
from pyro.infer.util import (
check_fully_reparametrized,
is_validation_enabled,
torch_item,
)
from pyro.util import warn_if_nan
def _check_mean_field_requirement(model_trace, guide_trace):
"""
Checks that the guide and model sample sites are ordered identically.
This is sufficient but not necessary for correctness.
"""
model_sites = [
name
for name, site in model_trace.nodes.items()
if site["type"] == "sample" and name in guide_trace.nodes
]
guide_sites = [
name
for name, site in guide_trace.nodes.items()
if site["type"] == "sample" and name in model_trace.nodes
]
assert set(model_sites) == set(guide_sites)
if model_sites != guide_sites:
warnings.warn(
"Failed to verify mean field restriction on the guide. "
"To eliminate this warning, ensure model and guide sites "
"occur in the same order.\n"
+ "Model sites:\n "
+ "\n ".join(model_sites)
+ "Guide sites:\n "
+ "\n ".join(guide_sites)
)
class TraceMeanField_ELBO(Trace_ELBO):
"""
A trace implementation of ELBO-based SVI. This is currently the only
ELBO estimator in Pyro that uses analytic KL divergences when those
are available.
In contrast to, e.g.,
:class:`~pyro.infer.tracegraph_elbo.TraceGraph_ELBO` and
:class:`~pyro.infer.tracegraph_elbo.Trace_ELBO` this estimator places
restrictions on the dependency structure of the model and guide.
In particular it assumes that the guide has a mean-field structure,
i.e. that it factorizes across the different latent variables present
in the guide. It also assumes that all of the latent variables in the
guide are reparameterized. This latter condition is satisfied for, e.g.,
the Normal distribution but is not satisfied for, e.g., the Categorical
distribution.
.. warning:: This estimator may give incorrect results if the mean-field
condition is not satisfied.
Note for advanced users:
The mean field condition is a sufficient but not necessary condition for
this estimator to be correct. The precise condition is that for every
latent variable `z` in the guide, its parents in the model must not include
any latent variables that are descendants of `z` in the guide. Here
'parents in the model' and 'descendants in the guide' is with respect
to the corresponding (statistical) dependency structure. For example, this
condition is always satisfied if the model and guide have identical
dependency structures.
"""
def _get_trace(self, model, guide, args, kwargs):
model_trace, guide_trace = super()._get_trace(model, guide, args, kwargs)
if is_validation_enabled():
_check_mean_field_requirement(model_trace, guide_trace)
return model_trace, guide_trace
def loss(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
"""
loss = 0.0
for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs):
loss_particle, _ = self._differentiable_loss_particle(
model_trace, guide_trace
)
loss = loss + loss_particle / self.num_particles
warn_if_nan(loss, "loss")
return loss
def _differentiable_loss_particle(self, model_trace, guide_trace):
elbo_particle = 0
for name, model_site in model_trace.nodes.items():
if model_site["type"] == "sample":
if model_site["is_observed"]:
elbo_particle = elbo_particle + model_site["log_prob_sum"]
else:
guide_site = guide_trace.nodes[name]
if is_validation_enabled():
check_fully_reparametrized(guide_site)
# use kl divergence if available, else fall back on sampling
try:
kl_qp = kl_divergence(guide_site["fn"], model_site["fn"])
kl_qp = scale_and_mask(
kl_qp, scale=guide_site["scale"], mask=guide_site["mask"]
)
if torch.is_tensor(kl_qp):
assert kl_qp.shape == guide_site["fn"].batch_shape
kl_qp_sum = kl_qp.sum()
else:
kl_qp_sum = (
kl_qp * torch.Size(guide_site["fn"].batch_shape).numel()
)
elbo_particle = elbo_particle - kl_qp_sum
except NotImplementedError:
entropy_term = guide_site["score_parts"].entropy_term
elbo_particle = (
elbo_particle
+ model_site["log_prob_sum"]
- entropy_term.sum()
)
# handle auxiliary sites in the guide
for name, guide_site in guide_trace.nodes.items():
if guide_site["type"] == "sample" and name not in model_trace.nodes:
assert guide_site["infer"].get("is_auxiliary")
if is_validation_enabled():
check_fully_reparametrized(guide_site)
entropy_term = guide_site["score_parts"].entropy_term
elbo_particle = elbo_particle - entropy_term.sum()
loss = -(
elbo_particle.detach()
if torch._C._get_tracing_state()
else torch_item(elbo_particle)
)
surrogate_loss = -elbo_particle
return loss, surrogate_loss
class JitTraceMeanField_ELBO(TraceMeanField_ELBO):
"""
Like :class:`TraceMeanField_ELBO` but uses :func:`pyro.ops.jit.trace` to
compile :meth:`loss_and_grads`.
This works only for a limited set of models:
- Models must have static structure.
- Models must not depend on any global data (except the param store).
- All model inputs that are tensors must be passed in via ``*args``.
- All model inputs that are *not* tensors must be passed in via
``**kwargs``, and compilation will be triggered once per unique
``**kwargs``.
"""
def differentiable_loss(self, model, guide, *args, **kwargs):
kwargs["_pyro_model_id"] = id(model)
kwargs["_pyro_guide_id"] = id(guide)
if getattr(self, "_loss_and_surrogate_loss", None) is None:
# build a closure for loss_and_surrogate_loss
weakself = weakref.ref(self)
@pyro.ops.jit.trace(
ignore_warnings=self.ignore_jit_warnings, jit_options=self.jit_options
)
def differentiable_loss(*args, **kwargs):
kwargs.pop("_pyro_model_id")
kwargs.pop("_pyro_guide_id")
self = weakself()
loss = 0.0
for model_trace, guide_trace in self._get_traces(
model, guide, args, kwargs
):
_, loss_particle = self._differentiable_loss_particle(
model_trace, guide_trace
)
loss = loss + loss_particle / self.num_particles
return loss
self._differentiable_loss = differentiable_loss
return self._differentiable_loss(*args, **kwargs)
def loss_and_grads(self, model, guide, *args, **kwargs):
loss = self.differentiable_loss(model, guide, *args, **kwargs)
loss.backward()
loss = torch_item(loss)
warn_if_nan(loss, "loss")
return loss
| apache-2.0 |
sinkpoint/dipy | doc/examples/reconst_dti.py | 5 | 9303 | """
============================================================
Reconstruction of the diffusion signal with the Tensor model
============================================================
The diffusion tensor model is a model that describes the diffusion within a
voxel. First proposed by Basser and colleagues [Basser1994]_, it has been very
influential in demonstrating the utility of diffusion MRI in characterizing the
micro-structure of white matter tissue and of the biophysical properties of
tissue, inferred from local diffusion properties and it is still very commonly
used.
The diffusion tensor models the diffusion signal as:
.. math::
\frac{S(\mathbf{g}, b)}{S_0} = e^{-b\mathbf{g}^T \mathbf{D} \mathbf{g}}
Where $\mathbf{g}$ is a unit vector in 3 space indicating the direction of
measurement and b are the parameters of measurement, such as the strength and
duration of diffusion-weighting gradient. $S(\mathbf{g}, b)$ is the
diffusion-weighted signal measured and $S_0$ is the signal conducted in a
measurement with no diffusion weighting. $\mathbf{D}$ is a positive-definite quadratic
form, which contains six free parameters to be fit. These six parameters are:
.. math::
\mathbf{D} = \begin{pmatrix} D_{xx} & D_{xy} & D_{xz} \\
D_{yx} & D_{yy} & D_{yz} \\
D_{zx} & D_{zy} & D_{zz} \\ \end{pmatrix}
This matrix is a variance/covariance matrix of the diffusivity along the three
spatial dimensions. Note that we can assume that diffusivity has antipodal
symmetry, so elements across the diagonal are equal. For example:
$D_{xy} = D_{yx}$. This is why there are only 6 free parameters to estimate
here.
In the following example we show how to reconstruct your diffusion datasets
using a single tensor model.
First import the necessary modules:
``numpy`` is for numerical computation
"""
import numpy as np
"""
``nibabel`` is for loading imaging datasets
"""
import nibabel as nib
"""
``dipy.reconst`` is for the reconstruction algorithms which we use to create
voxel models from the raw data.
"""
import dipy.reconst.dti as dti
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
from dipy.data import fetch_stanford_hardi
"""
Fetch will download the raw dMRI dataset of a single subject. The size of the
dataset is 87 MBytes. You only need to fetch once.
"""
fetch_stanford_hardi()
"""
Next, we read the saved dataset
"""
from dipy.data import read_stanford_hardi
img, gtab = read_stanford_hardi()
"""
img contains a nibabel Nifti1Image object (with the data) and gtab contains a
GradientTable object (information about the gradients e.g. b-values and
b-vectors).
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(81, 106, 76, 160)``
First of all, we mask and crop the data. This is a quick way to avoid
calculating Tensors on the background of the image. This is done using dipy's
mask module.
"""
from dipy.segment.mask import median_otsu
maskdata, mask = median_otsu(data, 3, 1, True,
vol_idx=range(10, 50), dilate=2)
print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)
"""
maskdata.shape ``(72, 87, 59, 160)``
Now that we have prepared the datasets we can go forward with the voxel
reconstruction. First, we instantiate the Tensor model in the following way.
"""
tenmodel = dti.TensorModel(gtab)
"""
Fitting the data is very simple. We just need to call the fit method of the
TensorModel in the following way:
"""
tenfit = tenmodel.fit(maskdata)
"""
The fit method creates a TensorFit object which contains the fitting parameters
and other attributes of the model. For example we can generate fractional
anisotropy (FA) from the eigen-values of the tensor. FA is used to characterize
the degree to which the distribution of diffusion in a voxel is
directional. That is, whether there is relatively unrestricted diffusion in one
particular direction.
Mathematically, FA is defined as the normalized variance of the eigen-values of
the tensor:
.. math::
FA = \sqrt{\frac{1}{2}\frac{(\lambda_1-\lambda_2)^2+(\lambda_1-
\lambda_3)^2+(\lambda_2-\lambda_3)^2}{\lambda_1^2+
\lambda_2^2+\lambda_3^2}}
Note that FA should be interpreted carefully. It may be an indication of the
density of packing of fibers in a voxel, and the amount of myelin wrapping these
axons, but it is not always a measure of "tissue integrity". For example, FA
may decrease in locations in which there is fanning of white matter fibers, or
where more than one population of white matter fibers crosses.
"""
print('Computing anisotropy measures (FA, MD, RGB)')
from dipy.reconst.dti import fractional_anisotropy, color_fa, lower_triangular
FA = fractional_anisotropy(tenfit.evals)
"""
In the background of the image the fitting will not be accurate there is no
signal and possibly we will find FA values with nans (not a number). We can
easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
Saving the FA images is very easy using nibabel. We need the FA volume and the
affine matrix which transform the image's coordinates to the world coordinates.
Here, we choose to save the FA in float32.
"""
fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
nib.save(fa_img, 'tensor_fa.nii.gz')
"""
You can now see the result with any nifti viewer or check it slice by slice
using matplotlib_'s imshow. In the same way you can save the eigen values, the
eigen vectors or any other properties of the Tensor.
"""
evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine())
nib.save(evecs_img, 'tensor_evecs.nii.gz')
"""
Other tensor statistics can be calculated from the `tenfit` object. For example,
a commonly calculated statistic is the mean diffusivity (MD). This is simply the
mean of the eigenvalues of the tensor. Since FA is a normalized
measure of variance and MD is the mean, they are often used as complimentary
measures. In `dipy`, there are two equivalent ways to calculate the mean
diffusivity. One is by calling the `mean_diffusivity` module function on the
eigen-values of the TensorFit class instance:
"""
MD1 = dti.mean_diffusivity(tenfit.evals)
nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), 'tensors_md.nii.gz')
"""
The other is to call the TensorFit class method:
"""
MD2 = tenfit.md
"""
Obviously, the quantities are identical.
We can also compute the colored FA or RGB-map [Pajevic1999]_. First, we make sure
that the FA is scaled between 0 and 1, we compute the RGB map and save it.
"""
FA = np.clip(FA, 0, 1)
RGB = color_fa(FA, tenfit.evecs)
nib.save(nib.Nifti1Image(np.array(255 * RGB, 'uint8'), img.get_affine()), 'tensor_rgb.nii.gz')
"""
Let's try to visualize the tensor ellipsoids of a small rectangular
area in an axial slice of the splenium of the corpus callosum (CC).
"""
print('Computing tensor ellipsoids in a part of the splenium of the CC')
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
from dipy.viz import fvtk
ren = fvtk.ren()
evals = tenfit.evals[13:43, 44:74, 28:29]
evecs = tenfit.evecs[13:43, 44:74, 28:29]
"""
We can color the ellipsoids using the ``color_fa`` values that we calculated
above. In this example we additionally normalize the values to increase the contrast.
"""
cfa = RGB[13:43, 44:74, 28:29]
cfa /= cfa.max()
fvtk.add(ren, fvtk.tensor(evals, evecs, cfa, sphere))
print('Saving illustration as tensor_ellipsoids.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids.png', size=(600, 600))
"""
.. figure:: tensor_ellipsoids.png
:align: center
**Tensor Ellipsoids**.
"""
fvtk.clear(ren)
"""
Finally, we can visualize the tensor orientation distribution functions
for the same area as we did with the ellipsoids.
"""
tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere)
fvtk.add(ren, fvtk.sphere_funcs(tensor_odfs, sphere, colormap=None))
#fvtk.show(r)
print('Saving illustration as tensor_odfs.png')
fvtk.record(ren, n_frames=1, out_path='tensor_odfs.png', size=(600, 600))
"""
.. figure:: tensor_odfs.png
:align: center
**Tensor ODFs**.
Note that while the tensor model is an accurate and reliable model of the
diffusion signal in the white matter, it has the drawback that it only has one
principal diffusion direction. Therefore, in locations in the brain that
contain multiple fiber populations crossing each other, the tensor model may
indicate that the principal diffusion direction is intermediate to these
directions. Therefore, using the principal diffusion direction for tracking in
these locations may be misleading and may lead to errors in defining the
tracks. Fortunately, other reconstruction methods can be used to represent the
diffusion and fiber orientations in those locations. These are presented in
other examples.
.. [Basser1994] Basser PJ, Mattielo J, LeBihan (1994). MR diffusion tensor
spectroscopy and imaging.
.. [Pajevic1999] Pajevic S, Pierpaoli (1999). Color schemes to represent
the orientation of anisotropic tissues from diffusion tensor
data: application to white matter fiber tract mapping in
the human brain.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
matthew-tucker/mne-python | examples/inverse/plot_make_inverse_operator.py | 21 | 3232 | """
===============================================================
Assemble inverse operator and compute MNE-dSPM inverse solution
===============================================================
Assemble M/EEG, MEG, and EEG inverse operators and compute dSPM
inverse solution on MNE evoked dataset and stores the solution
in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
print(__doc__)
data_path = sample.data_path()
fname_fwd_meeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_fwd_eeg = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True)
noise_cov = mne.read_cov(fname_cov)
# Restrict forward solution as necessary for MEG
forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
# Alternatively, you can just load a forward solution that is restricted
forward_eeg = mne.read_forward_solution(fname_fwd_eeg, surf_ori=True)
# make an M/EEG, MEG-only, and EEG-only inverse operators
info = evoked.info
inverse_operator_meeg = make_inverse_operator(info, forward_meeg, noise_cov,
loose=0.2, depth=0.8)
inverse_operator_meg = make_inverse_operator(info, forward_meg, noise_cov,
loose=0.2, depth=0.8)
inverse_operator_eeg = make_inverse_operator(info, forward_eeg, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meeg-oct-6-inv.fif',
inverse_operator_meeg)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator_meg)
write_inverse_operator('sample_audvis-eeg-oct-6-inv.fif',
inverse_operator_eeg)
# Compute inverse solution
stcs = dict()
stcs['meeg'] = apply_inverse(evoked, inverse_operator_meeg, lambda2, "dSPM",
pick_ori=None)
stcs['meg'] = apply_inverse(evoked, inverse_operator_meg, lambda2, "dSPM",
pick_ori=None)
stcs['eeg'] = apply_inverse(evoked, inverse_operator_eeg, lambda2, "dSPM",
pick_ori=None)
# Save result in stc files
names = ['meeg', 'meg', 'eeg']
for name in names:
stcs[name].save('mne_dSPM_inverse-%s' % name)
###############################################################################
# View activation time-series
plt.close('all')
plt.figure(figsize=(8, 6))
for ii in range(len(stcs)):
name = names[ii]
stc = stcs[name]
plt.subplot(len(stcs), 1, ii + 1)
plt.plot(1e3 * stc.times, stc.data[::150, :].T)
plt.ylabel('%s\ndSPM value' % str.upper(name))
plt.xlabel('time (ms)')
plt.show()
| bsd-3-clause |
uber/pyro | pyro/ops/streaming.py | 1 | 8903 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import copy
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Dict, Hashable, Union
import torch
from pyro.ops.welford import WelfordCovariance
class StreamingStats(ABC):
"""
Abstract base class for streamable statistics of trees of tensors.
Derived classes must implelement :meth:`update`, :meth:`merge`, and
:meth:`get`.
"""
@abstractmethod
def update(self, sample) -> None:
"""
Update state from a single sample.
This mutates ``self`` and returns nothing. Updates should be
independent of order, i.e. samples should be exchangeable.
:param sample: A sample value which is a nested dictionary of
:class:`torch.Tensor` leaves. This can have arbitrary nesting and
shape shape, but assumes shape is constant across calls to
``.update()``.
"""
raise NotImplementedError
@abstractmethod
def merge(self, other) -> "StreamingStats":
"""
Select two aggregate statistics, e.g. from different MCMC chains.
This is a pure function: it returns a new :class:`StreamingStats`
object and does not modify either ``self`` or ``other``.
:param other: Another streaming stats instance of the same type.
"""
assert isinstance(other, type(self))
raise NotImplementedError
@abstractmethod
def get(self) -> Any:
"""
Return the aggregate statistic.
"""
raise NotImplementedError
class CountStats(StreamingStats):
"""
Statistic tracking only the number of samples.
For example::
>>> stats = CountStats()
>>> stats.update(torch.randn(3, 3))
>>> stats.get()
{'count': 1}
"""
def __init__(self):
self.count = 0
super().__init__()
def update(self, sample) -> None:
self.count += 1
def merge(self, other: "CountStats") -> "CountStats":
assert isinstance(other, type(self))
result = CountStats()
result.count = self.count + other.count
return result
def get(self) -> Dict[str, int]:
"""
:returns: A dictionary with keys ``count: int``.
:rtype: dict
"""
return {"count": self.count}
class StatsOfDict(StreamingStats):
"""
Statistics of samples that are dictionaries with constant set of keys.
For example the following are equivalent::
# Version 1. Hand encode statistics.
>>> a_stats = CountStats()
>>> b_stats = CountMeanStats()
>>> a_stats.update(torch.tensor(0.))
>>> b_stats.update(torch.tensor([1., 2.]))
>>> summary = {"a": a_stats.get(), "b": b_stats.get()}
# Version 2. Collect samples into dictionaries.
>>> stats = StatsOfDict({"a": CountStats, "b": CountMeanStats})
>>> stats.update({"a": torch.tensor(0.), "b": torch.tensor([1., 2.])})
>>> summary = stats.get()
>>> summary
{'a': {'count': 1}, 'b': {'count': 1, 'mean': tensor([1., 2.])}}
:param default: Default type of statistics of values of the dictionary.
Defaults to the inexpensive :class:`CountStats`.
:param dict types: Dictionary mapping key to type of statistic that should
be recorded for values corresponding to that key.
"""
def __init__(
self,
types: Dict[Hashable, Callable[[], StreamingStats]] = {},
default: Callable[[], StreamingStats] = CountStats,
):
self.stats: Dict[Hashable, StreamingStats] = defaultdict(default)
self.stats.update({k: v() for k, v in types.items()})
super().__init__()
def update(self, sample: Dict[Hashable, Any]) -> None:
for k, v in sample.items():
self.stats[k].update(v)
def merge(self, other: "StatsOfDict") -> "StatsOfDict":
assert isinstance(other, type(self))
result = copy.deepcopy(self)
for k in set(self.stats).union(other.stats):
if k not in self.stats:
result.stats[k] = copy.deepcopy(other.stats[k])
elif k in other.stats:
result.stats[k] = self.stats[k].merge(other.stats[k])
return result
def get(self) -> Dict[Hashable, Any]:
"""
:returns: A dictionary of statistics. The keys of this dictionary are
the same as the keys of the samples from which this object is
updated.
:rtype: dict
"""
return {k: v.get() for k, v in self.stats.items()}
class StackStats(StreamingStats):
"""
Statistic collecting a stream of tensors into a single stacked tensor.
"""
def __init__(self):
self.samples = []
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
self.samples.append(sample)
def merge(self, other: "StackStats") -> "StackStats":
assert isinstance(other, type(self))
result = StackStats()
result.samples = self.samples + other.samples
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``samples: torch.Tensor``.
:rtype: dict
"""
if not self.samples:
return {"count": 0}
return {"count": len(self.samples), "samples": torch.stack(self.samples)}
class CountMeanStats(StreamingStats):
"""
Statistic tracking the count and mean of a single :class:`torch.Tensor`.
"""
def __init__(self):
self.count = 0
self.mean = 0
super().__init__()
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
self.count += 1
self.mean += (sample.detach() - self.mean) / self.count
def merge(self, other: "CountMeanStats") -> "CountMeanStats":
assert isinstance(other, type(self))
result = CountMeanStats()
result.count = self.count + other.count
p = self.count / max(result.count, 1)
q = other.count / max(result.count, 1)
result.mean = p * self.mean + q * other.mean
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``mean: torch.Tensor``.
:rtype: dict
"""
if self.count == 0:
return {"count": 0}
return {"count": self.count, "mean": self.mean}
class CountMeanVarianceStats(StreamingStats):
"""
Statistic tracking the count, mean, and (diagonal) variance of a single
:class:`torch.Tensor`.
"""
def __init__(self):
self.shape = None
self.welford = WelfordCovariance(diagonal=True)
super().__init__()
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
if self.shape is None:
self.shape = sample.shape
assert sample.shape == self.shape
self.welford.update(sample.detach().reshape(-1))
def merge(self, other: "CountMeanVarianceStats") -> "CountMeanVarianceStats":
assert isinstance(other, type(self))
if self.shape is None:
return copy.deepcopy(other)
if other.shape is None:
return copy.deepcopy(self)
result = copy.deepcopy(self)
res = result.welford
lhs = self.welford
rhs = other.welford
res.n_samples = lhs.n_samples + rhs.n_samples
lhs_weight = lhs.n_samples / res.n_samples
rhs_weight = rhs.n_samples / res.n_samples
res._mean = lhs_weight * lhs._mean + rhs_weight * rhs._mean
res._m2 = (
lhs._m2
+ rhs._m2
+ (lhs.n_samples * rhs.n_samples / res.n_samples)
* (lhs._mean - rhs._mean) ** 2
)
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``mean: torch.Tensor`` and ``variance:
torch.Tensor``.
:rtype: dict
"""
if self.shape is None:
return {"count": 0}
count = self.welford.n_samples
mean = self.welford._mean.reshape(self.shape)
variance = self.welford.get_covariance(regularize=False).reshape(self.shape)
return {"count": count, "mean": mean, "variance": variance}
# Note this is ordered logically for sphinx rather than alphabetically.
__all__ = [
"StreamingStats",
"StatsOfDict",
"StackStats",
"CountStats",
"CountMeanStats",
"CountMeanVarianceStats",
]
| apache-2.0 |
matthew-tucker/mne-python | mne/datasets/megsim/megsim.py | 15 | 6566 | # Author: Eric Larson <larson.eric.d@gmail.com>
# License: BSD Style.
import os
from os import path as op
import zipfile
from sys import stdout
from ...utils import _fetch_file, _url_to_local_path, verbose
from ..utils import _get_path, _do_path_update
from .urls import (url_match, valid_data_types, valid_data_formats,
valid_conditions)
@verbose
def data_path(url, path=None, force_update=False, update_path=None,
verbose=None):
"""Get path to local copy of MEGSIM dataset URL
This is a low-level function useful for getting a local copy of a
remote MEGSIM dataet.
Parameters
----------
url : str
The dataset to use.
path : None | str
Location of where to look for the MEGSIM data storing location.
If None, the environment variable or config parameter
MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
"mne-python/examples" directory is used. If the MEGSIM dataset
is not found under the given path (e.g., as
"mne-python/examples/MEGSIM"), the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
config to the given path. If None, the user is prompted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
path : list of str
Local paths to the given data files. If URL was a .fif file, this
will be a list of length 1. If it was a .zip file, it may potentially
be many files.
Notes
-----
For example, one could do:
>>> from mne.datasets import megsim
>>> url = 'http://cobre.mrn.org/megsim/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif'
>>> megsim.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
And this would download the given MEGSIM data file to the 'datasets'
folder, and prompt the user to save the 'datasets' path to the mne-python
config, if it isn't there already.
The MEGSIM dataset is documented in the following publication:
Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
(2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
Realistic Simulated and Empirical Data. Neuroinform 10:141-158
""" # noqa
key = 'MNE_DATASETS_MEGSIM_PATH'
name = 'MEGSIM'
path = _get_path(path, key, name)
destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
destinations = [destination]
split = op.splitext(destination)
is_zip = True if split[1].lower() == '.zip' else False
# Fetch the file
do_unzip = False
if not op.isfile(destination) or force_update:
if op.isfile(destination):
os.remove(destination)
if not op.isdir(op.dirname(destination)):
os.makedirs(op.dirname(destination))
_fetch_file(url, destination, print_destination=False)
do_unzip = True
if is_zip:
z = zipfile.ZipFile(destination)
decomp_dir, name = op.split(destination)
files = z.namelist()
# decompress if necessary (if download was re-done)
if do_unzip:
stdout.write('Decompressing %g files from\n'
'"%s" ...' % (len(files), name))
z.extractall(decomp_dir)
stdout.write(' [done]\n')
z.close()
destinations = [op.join(decomp_dir, f) for f in files]
path = _do_path_update(path, update_path, key, name)
return destinations
@verbose
def load_data(condition='visual', data_format='raw', data_type='experimental',
path=None, force_update=False, update_path=None, verbose=None):
"""Get path to local copy of MEGSIM dataset type
Parameters
----------
condition : str
The condition to use. Either 'visual', 'auditory', or 'somatosensory'.
data_format : str
The data format. Either 'raw', 'evoked', or 'single-trial'.
data_type : str
The type of data. Either 'experimental' or 'simulation'.
path : None | str
Location of where to look for the MEGSIM data storing location.
If None, the environment variable or config parameter
MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
"mne-python/examples" directory is used. If the MEGSIM dataset
is not found under the given path (e.g., as
"mne-python/examples/MEGSIM"), the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
config to the given path. If None, the user is prompted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
For example, one could do:
>>> from mne.datasets import megsim
>>> megsim.load_data('visual', 'raw', 'experimental', os.getenv('HOME') + '/datasets') # doctest:+SKIP
And this would download the raw visual experimental MEGSIM dataset to the
'datasets' folder, and prompt the user to save the 'datasets' path to the
mne-python config, if it isn't there already.
The MEGSIM dataset is documented in the following publication:
Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
(2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
Realistic Simulated and Empirical Data. Neuroinform 10:141-158
""" # noqa
if not condition.lower() in valid_conditions:
raise ValueError('Unknown condition "%s"' % condition)
if data_format not in valid_data_formats:
raise ValueError('Unknown data_format "%s"' % data_format)
if data_type not in valid_data_types:
raise ValueError('Unknown data_type "%s"' % data_type)
urls = url_match(condition, data_format, data_type)
data_paths = list()
for url in urls:
data_paths.extend(data_path(url, path, force_update, update_path))
return data_paths
| bsd-3-clause |
jhseu/tensorflow | tensorflow/python/autograph/operators/control_flow.py | 1 | 36743 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc.
Note: most of these operators accept pairs of get_state/set_state functions, to
capture mutations that the corresponding code blocks might make. These
mutations only need to be captured when staging the control flow, and they just
work when reverting to Python behavior.
__Examples__
```
while cond:
self.x += i
```
When the functionalized version is executed as a Python loop, it just works:
```
def loop_body():
self.x += i # works as expected for Python loops
```
But it won't work for TF loops:
```
def loop_body():
self.x += i # self.x has the wrong value!
```
get_state/set_state allow piping the mutations through the loop variables as
well, in effect changing the loop body:
```
def loop_body(self_x):
self.x = self_x # self.x now has the proper value
self.x += i # the original block
self_x = self.x # write self.x back into the loop vars
return self_x
self_x = tf.while_loop(...)
self.x = self_x # the result is not properly captured
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
import numpy as np
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.autograph.utils import compat_util
from tensorflow.python.autograph.utils import misc
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest
# TODO(b/145618471): Remove this dependency.
# Lazy import to work around circular dependencies
input_lib = lazy_loader.LazyLoader(
'input_lib', globals(),
'tensorflow.python.distribute.input_lib')
PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops.
WARN_INEFFICIENT_UNROLL = True
INEFFICIENT_UNROLL_MIN_ITERATIONS = 3000
INEFFICIENT_UNROLL_MIN_OPS = 1
# TODO(mdan): Use the custom operator pattern instead of type dispatch.
# An example of this pattern is found in the implementation of distributed
# datasets. Before it can be used though, we need to standardize the interface.
# TODO(mdan): Use existing symbol names rather than carrying them separately.
def _disallow_undefs_into_loop(*values):
"""Ensures that all values in the state are defined when entering a loop."""
undefined = tuple(filter(special_values.is_undefined, values))
if undefined:
raise ValueError(
'{} must be defined before the loop.'.format(
','.join(s.symbol_name for s in undefined)))
for value in values:
if special_values.is_undefined_return(value):
# Assumption: the loop will only capture the variable which tracks the
# return value if the loop contained a return statement.
# TODO(mdan): This should be checked at the place where return occurs.
raise ValueError(
'return statements are not supported within a TensorFlow loop.')
def _is_subshape(left, right):
"""Returns True if left shape is at least as specific as right shape."""
# TODO(mdan): This code should be in TensorShape.
# Note: this is not the same as TensorShape.is_compatible_with, which is
# symmetric.
# This code also duplicates _ShapeLessThanOrEqual from control_flow_ops.py.
if right.dims is None:
return True
if left.ndims != right.ndims:
return False
for ldim, rdim in zip(left.dims, right.dims):
if rdim.value is not None and ldim.value != rdim.value:
return False
return True
# TODO(mdan): Remove these verifications once TF ops can properly report names.
def _verify_single_loop_var(
name, check_shape, init, entry, exit_, shape_invariant):
"""Verifies whether the initial, entry and exit values are consistent."""
if isinstance(init, (bool, int, float, str, np.ndarray)):
init = ops.convert_to_tensor_v2(init)
if isinstance(entry, (bool, int, float, str, np.ndarray)):
entry = ops.convert_to_tensor_v2(entry)
if isinstance(exit_, (bool, int, float, str)):
exit_ = ops.convert_to_tensor_v2(exit_)
if (not tensor_util.is_tensor(entry) or
not tensor_util.is_tensor(exit_)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(entry, 'dtype') or
not hasattr(exit_, 'dtype')):
return
if (not hasattr(entry, 'shape') or
not hasattr(exit_, 'shape')):
return
if entry.dtype != exit_.dtype:
raise TypeError(
'"{}" has dtype {} before the loop, but dtype {} after one'
' iteration. TensorFlow control flow requires it stays the'
' same.'.format(
name,
entry.dtype.name,
exit_.dtype.name,
))
if check_shape:
exit_shape = exit_.shape
if shape_invariant is None:
entry_shape = entry.shape
if not _is_subshape(exit_shape, entry_shape):
raise ValueError(
'"{}" has shape {} before the loop, but shape {} after one'
' iteration. Use tf.autograph.experimental.set_loop_options to set'
' shape invariants.'.format(name, entry_shape, exit_shape))
else:
init_shape = init.shape
if not _is_subshape(init_shape, shape_invariant):
raise ValueError(
'"{}" has shape {} before the loop, which does not conform with'
' the shape invariant {}.'.format(name, init_shape,
shape_invariant))
if not _is_subshape(exit_shape, shape_invariant):
raise ValueError(
'"{}" has shape {} after one iteration, which does not conform with'
' the shape invariant {}.'.format(
name, exit_shape, shape_invariant))
def _verify_tf_loop_vars(init_vars,
iter_entry_vars,
iter_exit_vars,
symbol_names,
opts,
check_shapes=True):
"""Verifies loop variables for consistency."""
if check_shapes and 'shape_invariants' in opts:
shape_invariants = opts['shape_invariants']
else:
shape_invariants = nest.map_structure(lambda _: None, iter_entry_vars)
assert len(symbol_names) == len(shape_invariants)
assert len(symbol_names) == len(init_vars)
assert len(symbol_names) == len(iter_entry_vars)
assert len(symbol_names) == len(iter_exit_vars)
for i in range(len(symbol_names)):
name = symbol_names[i]
init = init_vars[i]
entry = iter_entry_vars[i]
exit_ = iter_exit_vars[i]
invariant = shape_invariants[i]
try:
nest.assert_same_structure(init, entry, expand_composites=True)
nest.assert_same_structure(entry, exit_, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError('"{}" does not have the same nested structure after one'
' iteration.\n\n{}'.format(name, e))
if invariant is not None:
try:
nest.assert_same_structure(init, invariant, expand_composites=False)
except (ValueError, TypeError) as e:
raise TypeError('"{}" does not have the same nested structure as its'
' corresponding shape invariant.\n\n{}'.format(name, e))
nest.map_structure(
functools.partial(_verify_single_loop_var, name, check_shapes), init,
entry, exit_, invariant)
def _verify_single_cond_var(name, body_var, orelse_var):
"""Verifies whether body_var and orelse_var are consistent."""
if isinstance(body_var, (bool, int, float, str)):
body_var = ops.convert_to_tensor_v2(body_var)
if isinstance(orelse_var, (bool, int, float, str)):
orelse_var = ops.convert_to_tensor_v2(orelse_var)
if (not tensor_util.is_tensor(body_var) or
not tensor_util.is_tensor(orelse_var)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(body_var, 'dtype') or
not hasattr(orelse_var, 'dtype')):
return
if body_var.dtype != orelse_var.dtype:
raise TypeError(
'"{}" has dtype {} in the TRUE branch, but dtype={} in the FALSE'
' branch. TensorFlow control flow requires that they are the'
' same.'.format(name, body_var.dtype.name,
orelse_var.dtype.name))
def _verify_tf_cond_vars(body_vars, orelse_vars, symbol_names):
"""Verifies variables manipulated by a conditional for consistency."""
basic_body_vars, composite_body_vars = body_vars
basic_orelse_vars, composite_orelse_vars = orelse_vars
assert isinstance(composite_body_vars, tuple)
assert isinstance(composite_orelse_vars, tuple)
# TODO(kkimlabs): Make this more consistent.
# The basic outputs should always be a tuple.
if not isinstance(basic_body_vars, tuple):
basic_body_vars = (basic_body_vars,)
if not isinstance(basic_orelse_vars, tuple):
basic_orelse_vars = (basic_orelse_vars,)
body_vars = basic_body_vars + composite_body_vars
orelse_vars = basic_orelse_vars + composite_orelse_vars
named_vars = zip(symbol_names, body_vars, orelse_vars)
for name, body_var, orelse_var in named_vars:
try:
nest.assert_same_structure(
body_var, orelse_var, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
'"{}" does not have the same nested structure in the TRUE and FALSE'
' branches.\n\n{}'.format(name, str(e)))
nest.map_structure(
functools.partial(_verify_single_cond_var, name), body_var, orelse_var)
def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
```
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
```
The state is represented by the variables geo_mean and arith_mean. The
`extra_test`, `body`, `get_state` and `set_state` functions must bind to the
original `geo_mean` and `arith_mean` symbols, using `nonlocal`.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and state as
return type. The actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing names of the loop variables returned by
get_state.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
if tensors.is_range_tensor(iter_):
_tf_range_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, dataset_ops.DatasetV2):
_tf_dataset_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, iterator_ops.OwnedIterator):
_tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, ragged_tensor.RaggedTensor):
_tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, input_lib.DistributedIterator):
raise NotImplementedError(
'distributed iterators not supported yet, use the distributed dataset'
' directly')
# TODO(mdan): Resolve the private access issue.
elif isinstance(iter_, input_lib._IterableInput): # pylint:disable=protected-access
_tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_py_for_stmt(iter_, extra_test, body, None, None)
def _py_for_stmt(iter_, extra_test, body, get_state, set_state):
"""Overload of for_stmt that executes a Python for loop."""
del get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body(protected_iter):
original_body(protected_iter)
after_iteration()
before_iteration()
body = protected_body
if extra_test is not None:
if extra_test():
for target in iter_:
body(target)
if not extra_test():
break
else:
for target in iter_:
body(target)
def _known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF entities that admit a length."""
n = py_builtins.len_(iter_)
# TODO(b/117628877): Revisit performance once XLA has the necessary support.
# Note: using a TensorArray creates an extra copy, but can calculate
# gradients more efficiently than StridedSlice.
ta = tensor_array_ops.TensorArray(iter_.dtype, size=n)
iter_ = ta.unstack(iter_)
iterate_index = compat_util.BasicRef(0)
def aug_get_state():
return (iterate_index.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate_index.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iter_.read(iterate_index.value))
iterate_index.value += 1
def aug_test():
main_test = iterate_index.value < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = n
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts,
)
def _tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF ragged tensors."""
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
# TODO(mdan): Move this into len()? Requires eager support.
if iter_.shape and iter_.shape[0] is not None:
n = iter_.shape[0]
else:
n = iter_.row_lengths()[0]
iterate_index = compat_util.BasicRef(0)
def aug_get_state():
return (iterate_index.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate_index.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iter_[iterate_index.value])
iterate_index.value += 1
def aug_test():
main_test = iterate_index.value < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = n
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_range_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
start, limit, delta = iter_.op.inputs
iterate = compat_util.BasicRef(start)
def aug_get_state():
return (iterate.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iterate.value)
iterate.value += delta
def aug_test():
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate.value < limit),
math_ops.logical_and(delta < 0, iterate.value > limit))
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF Iterators. See for_loop."""
symbol_names = ('<internal has_next>',) + symbol_names
has_next = compat_util.BasicRef(True)
def aug_get_state():
return (has_next.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
has_next.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
set_state(loop_vars)
init_vars = aug_get_state()
_disallow_undefs_into_loop(*init_vars)
def aug_body():
"""Main body passed to _tf_while_stmt."""
opt_iterate = iterator_ops.get_next_as_optional(iter_)
has_next.value = opt_iterate.has_value()
loop_vars = aug_get_state() # updated by set_state() in _tf_while_loop.
def main_path():
body(opt_iterate.get_value())
new_loop_vars = aug_get_state()
# Note: this verification duplicates the one performed in tf_while_stmt,
# but needs to be done earlier to prevent the tf.cond from blowing up
# first.
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
def noop_path():
return loop_vars
# TODO(mdan): If tf.while_loop supported Optional, this could be avoided.
# Calling set_state so that get_state() _tf_while_loop sees the conditional
# tensors.
aug_set_state(
control_flow_ops.cond(has_next.value, main_path, noop_path))
def aug_test():
# This value takes a complicated path to get here:
# prev_iteration_body -> get_state -> tf.while_loop (as loop var)
# -> current_iteration_body -> set_state -> has_next.value
main_test = has_next.value
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
symbol_names,
opts)
def _general_purpose_scan(ds, init_state, body):
"""Variant of Dataset.scan with semantics of general-purpose computation."""
# Datasets are typically intended for data preprocessing. However, in
# autograph loops they usually appear as general-purpose computations (for
# example, a custom training loop). These two use cases require significantly
# different optimization policies, the most important of which is the device
# placement. The flag override for use_default_device below instructs the
# runtime to treat the computation as general-purpose, rather than data
# preprocessing.
# TODO(mdan): s/use_default_device/specialize_for_input_pipeline.
# TODO(mdan): Don't use private symbols.
return scan_ops._ScanDataset(ds, init_state, body, use_default_device=False) # pylint:disable=protected-access
def _tf_dataset_for_stmt(
ds, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of _dataset_for_stmt with early stopping. See for_stmt."""
# Note: This is easier to follow with the insight that the computations in
# a dataset pipeline are transposed (aka fused).
# For example, given a pipeline input -> scan -> take_while -> reduce,
# and a dataset with input [1, 2, 3], the computations occur in the following
# order:
# reduce(take_while(scan(1)))
# reduce(take_while(scan(2)))
# reduce(take_while(scan(3)))
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
# Workaround for Dataset.reduce not allowing empty state tensors - create
# a dummy state variable that remains unused.
# TODO(mdan): reduce should allow and match empty structures.
if not init_vars:
init_vars = (constant_op.constant(0),)
symbol_names = ('<internal dummy>',)
def dummy_set_state(unused_dummy):
pass
def dummy_get_state():
return (constant_op.constant(0),)
get_state, set_state = dummy_get_state, dummy_set_state
def scan_body(scan_state, scan_inputs):
"""Main body of the Dataset.scan."""
loop_vars, iterate = scan_state, scan_inputs
set_state(loop_vars)
def main_path():
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts,
check_shapes=False)
return new_loop_vars
if extra_test is not None:
extra_cond = extra_test()
new_loop_vars = control_flow_ops.cond(
extra_cond, main_path, lambda: loop_vars)
else:
# TODO(mdan): the optimizer should be able to remove an invariant cond?
extra_cond = (constant_op.constant(True),) # dummy value, unused
new_loop_vars = main_path()
scan_outputs = new_loop_vars, extra_cond
new_scan_state = new_loop_vars
return new_scan_state, scan_outputs
def take_while_predicate(unused_loop_vars, extra_cond):
return extra_cond
def reduce_body(unused_reduce_state, scan_outputs):
output_loop_vars, unused_extra_cond = scan_outputs
new_reduce_state = output_loop_vars
return new_reduce_state
ds = _general_purpose_scan(ds, init_vars, scan_body)
if extra_test is not None:
ds = ds.apply(take_while_ops.take_while(take_while_predicate))
final_loop_vars = ds.reduce(init_vars, reduce_body)
set_state(final_loop_vars)
def _tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF distributed datasets."""
if extra_test is not None:
raise NotImplementedError(
'break and return statements are not yet supported in '
'for ... in distributed input loops.')
init_vars = get_state()
_disallow_undefs_into_loop(init_vars)
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
def reduce_body(loop_vars, iterate):
set_state(loop_vars)
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
set_state(iter_.reduce(init_vars, reduce_body))
def while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type. The
loop condition.
body: Callable with the state as arguments, and state as return type. The
actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing the names of all loop variables.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
with func_graph.FuncGraph('tmp').as_default():
init_test = test()
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if tensors.is_dense_tensor(init_test):
_tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)
return
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return
body()
_py_while_stmt(test, body, get_state, set_state, opts)
class _PythonLoopChecker(object):
"""Verifies Python loops for TF-specific limits."""
__slots__ = (
'iterations',
'check_inefficient_unroll',
'check_op_count_after_iteration',
'ops_before_iteration',
)
def __init__(self):
self.iterations = 1
self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL
# Triggered when we decided to test the op counts.
self.check_op_count_after_iteration = False
def _get_ops(self):
return ops.get_default_graph().get_operations()
def _check_unroll_limits(self):
if self.iterations > PYTHON_MAX_ITERATIONS:
raise ValueError('iteration limit exceeded')
def _stop_checking_inefficient_unroll(self):
self.check_inefficient_unroll = False
self.check_op_count_after_iteration = False
self.ops_before_iteration = None
def _verify_ineffcient_unroll(self):
"""Checks for possibly-inefficient creation of ops in a Python loop."""
assert self.ops_before_iteration is not None
ops_after_iteration = self._get_ops()
new_ops = tuple(
op for op in ops_after_iteration if op not in self.ops_before_iteration)
if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:
return False
ag_logging.warn(
'Large unrolled loop detected. Did you mean to use a TF loop?'
' The following ops were created after iteration %s: %s'
'\nSee'
' https://github.com/tensorflow/tensorflow/blob/master/'
'tensorflow/python/autograph/g3doc/reference/common_errors.md'
'#warning-large-unrolled-loop-detected'
'\n'
'Location:'
'\n%s'
'', self.iterations, new_ops, '\n'.join(traceback.format_stack()))
return True
def before_iteration(self):
"""Called before each iteration in a Python loop."""
if (self.check_inefficient_unroll and
self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS):
self.ops_before_iteration = self._get_ops()
self.check_op_count_after_iteration = True
def after_iteration(self):
"""Called after each iteration in a Python loop."""
self.iterations += 1
self._check_unroll_limits()
if self.check_op_count_after_iteration:
did_warn = self._verify_ineffcient_unroll()
if did_warn:
self._stop_checking_inefficient_unroll() # Only warn once.
elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:
# Once deciding to check the op counts, only do it for a few iterations.
self._stop_checking_inefficient_unroll()
def _py_while_stmt(test, body, get_state, set_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts, get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body():
original_body()
after_iteration()
before_iteration()
body = protected_body
while test():
body()
def _shape_invariants_mapping_to_positional_list(mapping, keys):
# The keys are not expected to be hashable.
mapping = {id(k): (k, v) for k, v in mapping}
result = []
for k in keys:
map_key, map_val = mapping.get(id(k), (None, None))
result.append(map_val if map_key is k else None)
return tuple(result)
def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
def aug_test(*loop_vars):
set_state(loop_vars)
return test()
def aug_body(*loop_vars):
set_state(loop_vars)
body()
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
# Non-v2 while_loop unpacks the results when there is only one return value.
# This enforces consistency across versions.
opts['return_same_structure'] = True
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
final_loop_vars = control_flow_ops.while_loop(
aug_test, aug_body, init_vars, **opts)
set_state(final_loop_vars)
def if_stmt(cond,
body,
orelse,
get_state,
set_state,
basic_symbol_names,
composite_symbol_names):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is not
needed and should not be called when dispatching to code matching Python's
default semantics. This is useful for checkpointing to avoid unintended
side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing values
for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
basic_symbol_names: Tuple containing basic loop var names.
composite_symbol_names: Tuple containing composite loop var names.
Returns:
Tuple containing the statement outputs.
"""
# Note: tf.cond doesn't support SparseTensor.
if tensors.is_dense_tensor(cond):
return tf_if_stmt(cond, body, orelse, get_state, set_state,
basic_symbol_names, composite_symbol_names)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse, get_state, set_state, basic_symbol_names,
composite_symbol_names):
"""Overload of if_stmt that stages a TF cond."""
body = _wrap_disallow_undefs_from_cond(body, branch_name='if')
orelse = _wrap_disallow_undefs_from_cond(orelse, branch_name='else')
body = _isolate_state(body, get_state, set_state)
orelse = _isolate_state(orelse, get_state, set_state)
# `state` currently includes the values of any composite symbols (e.g. `a.b`)
# composites modified by the loop. `final_vars` includes the values of basic
# symbols (e.g. `a`) which cannot be passed by reference and must be returned.
# See _isolate_state.
# TODO(mdan): We should minimize calls to get/set_state.
body_branch = 0
orelse_branch = 1
result = [None, None]
def error_checking_body():
result[body_branch] = body()
if result[orelse_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names + composite_symbol_names)
return result[body_branch]
def error_checking_orelse():
result[orelse_branch] = orelse()
if result[body_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names + composite_symbol_names)
return result[orelse_branch]
final_vars, final_state = control_flow_ops.cond(cond, error_checking_body,
error_checking_orelse)
set_state(final_state)
return final_vars
def _isolate_state(func, get_state, set_state):
"""Wraps func to (best-effort) isolate state mutations that func may do.
The simplest example of state mutation is mutation of variables (via e.g.
attributes), or modification of globals.
This allows us to more safely execute this function without worrying about
side effects when the function wasn't normally expected to execute. For
example, staging requires that the function is executed ahead of time, and
we need to ensure its effects are not observed during normal execution.
Args:
func: () -> Any
get_state: () -> Any, returns the current state
set_state: (Any) -> None, resets the state to the specified values.
Typically the result of an earlier call to `get_state`.
Returns:
Tuple[Any, Any], where the first element is the return value of `func`,
and the second is the final state values.
"""
def wrapper():
init_state = get_state()
new_vars = func()
# TODO(mdan): These should be copies, lest set_state might affect them.
new_state = get_state()
set_state(init_state)
return new_vars, new_state
return wrapper
def _wrap_disallow_undefs_from_cond(func, branch_name):
"""Wraps conditional branch to disallow returning undefined symbols."""
def wrapper():
"""Calls function and raises an error if undefined symbols are returned."""
results = func()
if isinstance(results, tuple):
results_tuple = results
else:
results_tuple = results,
undefined = tuple(filter(special_values.is_undefined, results_tuple))
if undefined:
raise ValueError(
'The following symbols must also be initialized in the {} branch: {}.'
' Alternatively, you may initialize them before the if'
' statement.'.format(branch_name,
tuple(s.symbol_name for s in undefined)))
for result in results_tuple:
if special_values.is_undefined_return(result):
raise ValueError(
'A value must also be returned from the {} branch. If a value is '
'returned from one branch of a conditional a value must be '
'returned from all branches.'.format(branch_name))
return results
return wrapper
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
compat_util.deprecated_py2_support(__name__)
| apache-2.0 |
uber/pyro | pyro/distributions/conjugate.py | 1 | 10824 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import numbers
import torch
from torch.distributions.utils import broadcast_all
from pyro.ops.special import log_beta, log_binomial
from . import constraints
from .torch import Beta, Binomial, Dirichlet, Gamma, Multinomial, Poisson
from .torch_distribution import TorchDistribution
from .util import broadcast_shape
def _log_beta_1(alpha, value, is_sparse):
if is_sparse:
mask = value != 0
value, alpha, mask = torch.broadcast_tensors(value, alpha, mask)
result = torch.zeros_like(value)
value = value[mask]
alpha = alpha[mask]
result[mask] = (
torch.lgamma(1 + value) + torch.lgamma(alpha) - torch.lgamma(value + alpha)
)
return result
else:
return (
torch.lgamma(1 + value) + torch.lgamma(alpha) - torch.lgamma(value + alpha)
)
class BetaBinomial(TorchDistribution):
r"""
Compound distribution comprising of a beta-binomial pair. The probability of
success (``probs`` for the :class:`~pyro.distributions.Binomial` distribution)
is unknown and randomly drawn from a :class:`~pyro.distributions.Beta` distribution
prior to a certain number of Bernoulli trials given by ``total_count``.
:param concentration1: 1st concentration parameter (alpha) for the
Beta distribution.
:type concentration1: float or torch.Tensor
:param concentration0: 2nd concentration parameter (beta) for the
Beta distribution.
:type concentration0: float or torch.Tensor
:param total_count: Number of Bernoulli trials.
:type total_count: float or torch.Tensor
"""
arg_constraints = {
"concentration1": constraints.positive,
"concentration0": constraints.positive,
"total_count": constraints.nonnegative_integer,
}
has_enumerate_support = True
support = Binomial.support
# EXPERIMENTAL If set to a positive value, the .log_prob() method will use
# a shifted Sterling's approximation to the Beta function, reducing
# computational cost from 9 lgamma() evaluations to 12 log() evaluations
# plus arithmetic. Recommended values are between 0.1 and 0.01.
approx_log_prob_tol = 0.0
def __init__(
self, concentration1, concentration0, total_count=1, validate_args=None
):
concentration1, concentration0, total_count = broadcast_all(
concentration1, concentration0, total_count
)
self._beta = Beta(concentration1, concentration0)
self.total_count = total_count
super().__init__(self._beta._batch_shape, validate_args=validate_args)
@property
def concentration1(self):
return self._beta.concentration1
@property
def concentration0(self):
return self._beta.concentration0
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(BetaBinomial, _instance)
batch_shape = torch.Size(batch_shape)
new._beta = self._beta.expand(batch_shape)
new.total_count = self.total_count.expand_as(new._beta.concentration0)
super(BetaBinomial, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=()):
probs = self._beta.sample(sample_shape)
return Binomial(self.total_count, probs, validate_args=False).sample()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
n = self.total_count
k = value
a = self.concentration1
b = self.concentration0
tol = self.approx_log_prob_tol
return (
log_binomial(n, k, tol)
+ log_beta(k + a, n - k + b, tol)
- log_beta(a, b, tol)
)
@property
def mean(self):
return self._beta.mean * self.total_count
@property
def variance(self):
return (
self._beta.variance
* self.total_count
* (self.concentration0 + self.concentration1 + self.total_count)
)
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `enumerate_support`."
)
values = torch.arange(
1 + total_count,
dtype=self.concentration1.dtype,
device=self.concentration1.device,
)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
class DirichletMultinomial(TorchDistribution):
r"""
Compound distribution comprising of a dirichlet-multinomial pair. The probability of
classes (``probs`` for the :class:`~pyro.distributions.Multinomial` distribution)
is unknown and randomly drawn from a :class:`~pyro.distributions.Dirichlet`
distribution prior to a certain number of Categorical trials given by
``total_count``.
:param float or torch.Tensor concentration: concentration parameter (alpha) for the
Dirichlet distribution.
:param int or torch.Tensor total_count: number of Categorical trials.
:param bool is_sparse: Whether to assume value is mostly zero when computing
:meth:`log_prob`, which can speed up computation when data is sparse.
"""
arg_constraints = {
"concentration": constraints.independent(constraints.positive, 1),
"total_count": constraints.nonnegative_integer,
}
support = Multinomial.support
def __init__(
self, concentration, total_count=1, is_sparse=False, validate_args=None
):
batch_shape = concentration.shape[:-1]
event_shape = concentration.shape[-1:]
if isinstance(total_count, numbers.Number):
total_count = concentration.new_tensor(total_count)
else:
batch_shape = broadcast_shape(batch_shape, total_count.shape)
concentration = concentration.expand(batch_shape + (-1,))
total_count = total_count.expand(batch_shape)
self._dirichlet = Dirichlet(concentration)
self.total_count = total_count
self.is_sparse = is_sparse
super().__init__(batch_shape, event_shape, validate_args=validate_args)
@property
def concentration(self):
return self._dirichlet.concentration
@staticmethod
def infer_shapes(concentration, total_count=()):
batch_shape = broadcast_shape(concentration[:-1], total_count)
event_shape = concentration[-1:]
return batch_shape, event_shape
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(DirichletMultinomial, _instance)
batch_shape = torch.Size(batch_shape)
new._dirichlet = self._dirichlet.expand(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
new.is_sparse = self.is_sparse
super(DirichletMultinomial, new).__init__(
new._dirichlet.batch_shape, new._dirichlet.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=()):
probs = self._dirichlet.sample(sample_shape)
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `sample`."
)
return Multinomial(total_count, probs).sample()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
alpha = self.concentration
return _log_beta_1(alpha.sum(-1), value.sum(-1), self.is_sparse) - _log_beta_1(
alpha, value, self.is_sparse
).sum(-1)
@property
def mean(self):
return self._dirichlet.mean * self.total_count.unsqueeze(-1)
@property
def variance(self):
n = self.total_count.unsqueeze(-1)
alpha = self.concentration
alpha_sum = self.concentration.sum(-1, keepdim=True)
alpha_ratio = alpha / alpha_sum
return n * alpha_ratio * (1 - alpha_ratio) * (n + alpha_sum) / (1 + alpha_sum)
class GammaPoisson(TorchDistribution):
r"""
Compound distribution comprising of a gamma-poisson pair, also referred to as
a gamma-poisson mixture. The ``rate`` parameter for the
:class:`~pyro.distributions.Poisson` distribution is unknown and randomly
drawn from a :class:`~pyro.distributions.Gamma` distribution.
.. note:: This can be treated as an alternate parametrization of the
:class:`~pyro.distributions.NegativeBinomial` (``total_count``, ``probs``)
distribution, with `concentration = total_count` and `rate = (1 - probs) / probs`.
:param float or torch.Tensor concentration: shape parameter (alpha) of the Gamma
distribution.
:param float or torch.Tensor rate: rate parameter (beta) for the Gamma
distribution.
"""
arg_constraints = {
"concentration": constraints.positive,
"rate": constraints.positive,
}
support = Poisson.support
def __init__(self, concentration, rate, validate_args=None):
concentration, rate = broadcast_all(concentration, rate)
self._gamma = Gamma(concentration, rate)
super().__init__(self._gamma._batch_shape, validate_args=validate_args)
@property
def concentration(self):
return self._gamma.concentration
@property
def rate(self):
return self._gamma.rate
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(GammaPoisson, _instance)
batch_shape = torch.Size(batch_shape)
new._gamma = self._gamma.expand(batch_shape)
super(GammaPoisson, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=()):
rate = self._gamma.sample(sample_shape)
return Poisson(rate).sample()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
post_value = self.concentration + value
return (
-log_beta(self.concentration, value + 1)
- post_value.log()
+ self.concentration * self.rate.log()
- post_value * (1 + self.rate).log()
)
@property
def mean(self):
return self.concentration / self.rate
@property
def variance(self):
return self.concentration / self.rate.pow(2) * (1 + self.rate)
| apache-2.0 |
matthew-tucker/mne-python | mne/tests/test_source_estimate.py | 12 | 28321 | from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import testing
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces, MixedSourceEstimate)
from mne import read_source_estimate, morph_data, extract_label_time_course
from mne.source_estimate import (spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
compute_morph_matrix, grade_to_vertices,
grade_to_tris)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_h5py, run_tests_if_main, slow_test)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_smorph = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
@slow_test
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs
"""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@testing.requires_testing_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname_stc, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
tempdir = _TempDir()
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_h5py
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
tempdir = _TempDir()
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
tempdir = _TempDir()
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@slow_test
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
"""
stc = read_source_estimate(fname_stc)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
assert_raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
assert_raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@slow_test
@testing.requires_testing_data
def test_morph_data():
"""Test morphing of data
"""
tempdir = _TempDir()
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
subjects_dir=subjects_dir)
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
# make sure we get a warning about # of steps
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=1, buffer_size=3,
subjects_dir=subjects_dir)
assert_equal(len(w), 2)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, vertices_to, 'foo')
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
vertices_to, morph_mat, subject_from='foo')
# steps warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=1, subjects_dir=subjects_dir)
assert_equal(len(w), 2)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# Morph sparse data
# Make a sparse stc
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = np.random.randn(n_vertices, n_sensors)
sens_data = np.random.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertices[0]) == 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert len(w) == 1
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.to_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
assert_true(all([c in ['time', 'subject'] for c in
df.reset_index().columns][:2]))
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
@testing.requires_testing_data
def test_mixed_stc():
"""Test source estimate from mixed source space
"""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = np.random.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
assert_raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
vol = read_source_spaces(fname_vsrc)
# make sure error is raised for plotting surface with volume source
assert_raises(ValueError, stc.plot_surface, src=vol)
run_tests_if_main()
| bsd-3-clause |
uber/pyro | tests/infer/reparam/test_split.py | 1 | 3082 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.autograd import grad
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer.reparam import SplitReparam
from tests.common import assert_close
from .util import check_init_reparam
@pytest.mark.parametrize(
"event_shape,splits,dim",
[
((6,), [2, 1, 3], -1),
(
(
2,
5,
),
[2, 3],
-1,
),
((4, 2), [1, 3], -2),
((2, 3, 1), [1, 2], -2),
],
ids=str,
)
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)], ids=str)
def test_normal(batch_shape, event_shape, splits, dim):
shape = batch_shape + event_shape
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
def model():
with pyro.plate_stack("plates", batch_shape):
pyro.sample("x", dist.Normal(loc, scale).to_event(len(event_shape)))
# Run without reparam.
trace = poutine.trace(model).get_trace()
expected_value = trace.nodes["x"]["value"]
expected_log_prob = trace.log_prob_sum()
expected_grads = grad(expected_log_prob, [loc, scale], create_graph=True)
# Run with reparam.
split_values = {
"x_split_{}".format(i): xi
for i, xi in enumerate(expected_value.split(splits, dim))
}
rep = SplitReparam(splits, dim)
reparam_model = poutine.reparam(model, {"x": rep})
reparam_model = poutine.condition(reparam_model, split_values)
trace = poutine.trace(reparam_model).get_trace()
assert all(name in trace.nodes for name in split_values)
assert isinstance(trace.nodes["x"]["fn"], dist.Delta)
assert trace.nodes["x"]["fn"].batch_shape == batch_shape
assert trace.nodes["x"]["fn"].event_shape == event_shape
# Check values.
actual_value = trace.nodes["x"]["value"]
assert_close(actual_value, expected_value, atol=0.1)
# Check log prob.
actual_log_prob = trace.log_prob_sum()
assert_close(actual_log_prob, expected_log_prob)
actual_grads = grad(actual_log_prob, [loc, scale], create_graph=True)
assert_close(actual_grads, expected_grads)
@pytest.mark.parametrize(
"event_shape,splits,dim",
[
((6,), [2, 1, 3], -1),
(
(
2,
5,
),
[2, 3],
-1,
),
((4, 2), [1, 3], -2),
((2, 3, 1), [1, 2], -2),
],
ids=str,
)
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)], ids=str)
def test_init(batch_shape, event_shape, splits, dim):
shape = batch_shape + event_shape
loc = torch.empty(shape).uniform_(-1.0, 1.0)
scale = torch.empty(shape).uniform_(0.5, 1.5)
def model():
with pyro.plate_stack("plates", batch_shape):
return pyro.sample("x", dist.Normal(loc, scale).to_event(len(event_shape)))
check_init_reparam(model, SplitReparam(splits, dim))
| apache-2.0 |
vitaly-krugl/nupic | examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py | 50 | 15806 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
vitaly-krugl/nupic | examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py | 50 | 15806 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
vitaly-krugl/nupic | examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py | 50 | 15806 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
AlphaSmartDog/DeepLearningNotes | Torch-1 DDPG/Torch-2 DDPG GPU/agent/forward.py | 1 | 1703 | """
@author: Young
@license: (C) Copyright 2013-2017
@contact: aidabloc@163.com
@file: forward.py
@time: 2018/1/17 21:53
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as nf
def fan_in_init(size, fan_in=None):
if not isinstance(size, torch.Size):
raise ValueError("size should be torch.Size")
fan_in = fan_in or size[0]
value = 1. / np.sqrt(fan_in)
return torch.FloatTensor(size).uniform_(-value, value)
class ActorNet(nn.Module):
def __init__(self, state_size, action_size, init_w=3e-3):
super().__init__()
self.fc1 = nn.Linear(state_size, 400)
self.fc1.weight.data = fan_in_init(self.fc1.weight.data.size())
self.fc2 = nn.Linear(400, 300)
self.fc2.weight.data = fan_in_init(self.fc2.weight.data.size())
self.fc3 = nn.Linear(300, action_size)
self.fc3.weight.data.uniform_(-init_w, init_w)
def forward(self, state):
net = nf.relu(self.fc1(state))
net = nf.relu(self.fc2(net))
return nf.tanh(self.fc3(net))
class CriticNet(nn.Module):
def __init__(self, state_size, action_size, init_w=3e-3):
super().__init__()
self.fc1 = nn.Linear(state_size, 400)
self.fc1.weight.data = fan_in_init(self.fc1.weight.data.size())
self.fc2 = nn.Linear(400+action_size, 300)
self.fc2.weight.data = fan_in_init(self.fc2.weight.data.size())
self.fc3 = nn.Linear(300, 1)
self.fc3.weight.data.uniform_(-init_w, init_w)
def forward(self, state, action):
net = nf.relu(self.fc1(state))
net = torch.cat((net, action), -1)
net = nf.relu(self.fc2(net))
return self.fc3(net)
| mit |
uber/pyro | examples/contrib/autoname/mixture.py | 1 | 2572 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.autoname import named
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import Adam
# This is a simple gaussian mixture model.
#
# The example demonstrates how to pass named.Objects() from a global model to
# a local model implemented as a helper function.
def model(data, k):
latent = named.Object("latent")
# Create parameters for a Gaussian mixture model.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.simplex)
latent.locs.param_(torch.zeros(k))
latent.scales.param_(torch.ones(k), constraint=constraints.positive)
# Observe all the data. We pass a local latent in to the local_model.
latent.local = named.List()
for x in data:
local_model(latent.local.add(), latent.probs, latent.locs, latent.scales, obs=x)
def local_model(latent, ps, locs, scales, obs=None):
i = latent.id.sample_(dist.Categorical(ps))
return latent.x.sample_(dist.Normal(locs[i], scales[i]), obs=obs)
def guide(data, k):
latent = named.Object("latent")
latent.local = named.List()
for x in data:
# We pass a local latent in to the local_guide.
local_guide(latent.local.add(), k)
def local_guide(latent, k):
# The local guide simply guesses category assignments.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.positive)
latent.id.sample_(dist.Categorical(latent.probs))
def main(args):
pyro.set_rng_seed(0)
optim = Adam({"lr": 0.1})
elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
inference = SVI(model, guide, optim, loss=elbo)
data = torch.tensor([0.0, 1.0, 2.0, 20.0, 30.0, 40.0])
k = 2
print("Step\tLoss")
loss = 0.0
for step in range(args.num_epochs):
if step and step % 10 == 0:
print("{}\t{:0.5g}".format(step, loss))
loss = 0.0
loss += inference.step(data, k=k)
print("Parameters:")
for name, value in sorted(pyro.get_param_store().items()):
print("{} = {}".format(name, value.detach().cpu().numpy()))
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument("-n", "--num-epochs", default=200, type=int)
parser.add_argument("--jit", action="store_true")
args = parser.parse_args()
main(args)
| apache-2.0 |
ellangoj/dynamic_sgd | validation.py | 1 | 1886 | import numpy
import random
import matplotlib.pyplot as plt
import datasets
import models
if __name__ == "__main__":
train_set, test_set, m, n = datasets.movielens1m()
#train_set, test_set, d = datasets.realsim()
split = int(0.5*len(train_set))
train_set, val_set = train_set[:split], train_set[split:]
r = 10
L = numpy.random.rand(m, r)
R = numpy.random.rand(r, n)
#w = numpy.random.rand(d)
steps = 1*len(train_set)
step_size = 1e-2
mu = 1e-1
train_loss = []
validation_loss = []
#for mu in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 0]:
for step_size in [5e-2, 2e-2, 1e-2, 5e-3, 1e-3]:
model = models.MatrixFactorization(L, R, models.Opt.SAGA)
#model = models.LogisticRegression(w, models.Opt.SGD)
m = 0
for s in xrange(steps):
#if (s % (steps/50) == 0):
#train_loss.append(model.reg_loss(train_set, mu))
#validation_loss.append(model.loss(val_set))
if model.opt == models.Opt.SGD:
training_point = random.choice(train_set)
elif model.opt == models.Opt.SAGA:
if (s % 2 == 0 and m < len(train_set)):
j = m
m += 1
else:
j = random.randrange(m)
training_point = train_set[j]
model.update_step(training_point, step_size, mu)
print step_size, mu, model.loss(val_set)
#plt.figure(1)
#plt.plot(train_loss, 'k.-')
#plt.xlabel('Time')
#plt.ylabel('Loss')
#plt.title('Average train loss')
#plt.figure(2)
#plt.plot(validation_loss, 'k.-')
#plt.xlabel('Time')
#plt.ylabel('Loss')
#plt.title('Average validation loss')
#plt.show()
| mit |
kod3r/keras | examples/lstm_text_generation.py | 74 | 3257 | from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.datasets.data_utils import get_file
import numpy as np
import random, sys
'''
Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 20
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(512, 512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# helper function to sample an index from a probability array
def sample(a, temperature=1.0):
a = np.log(a)/temperature
a = np.exp(a)/np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1,a,1))
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index : start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for iteration in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
| mit |
windyuuy/opera | chromium/src/third_party/webpagereplay/third_party/dns/rdataset.py | 215 | 11527 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""Raised if an attempt is made to add a SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
pass
class IncompatibleTypes(dns.exception.DNSException):
"""Raised if an attempt is made to add rdata of an incompatible type."""
pass
class Rdataset(dns.set.Set):
"""A DNS rdataset.
@ivar rdclass: The class of the rdataset
@type rdclass: int
@ivar rdtype: The type of the rdataset
@type rdtype: int
@ivar covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@ivar ttl: The DNS TTL (Time To Live) value
@type ttl: int
"""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Create a new rdataset of the specified class and type.
@see: the description of the class instance variables for the
meaning of I{rdclass} and I{rdtype}"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = 0
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
@param ttl: The TTL
@type ttl: int"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional I{ttl} parameter is supplied, then
self.update_ttl(ttl) will be called prior to adding the rdata.
@param rd: The rdata
@type rd: dns.rdata.Rdata object
@param ttl: The TTL
@type ttl: int"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if not ttl is None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
@param other: The rdataset from which to update
@type other: dns.rdataset.Rdataset object"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two rdatasets are equal if they have the same class, type, and
covers, and contain the same rdata.
@rtype: bool"""
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
print >> s, '%s%s%s %s' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype))
else:
for rd in self:
print >> s, '%s%s%d %s %s %s' % \
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize, **kw))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
@param name: The owner name of the RRset that will be emitted
@type name: dns.name.Name object
@param file: The file to which the wire format data will be appended
@type file: file
@param compress: The compression table to use; the default is None.
@type compress: dict
@param origin: The origin to be appended to any relative names when
they are emitted. The default is None.
@returns: the number of records emitted
@rtype: int
"""
if not override_rdclass is None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns True if this rdataset matches the specified class, type,
and covers"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
return from_rdata_list(ttl, rdatas)
| bsd-3-clause |
rahuldhote/scikit-learn | setup.py | 142 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/utils/class_weight.py | 139 | 7206 | # Authors: Andreas Mueller
# Manoj Kumar
# License: BSD 3 clause
import warnings
import numpy as np
from ..externals import six
from ..utils.fixes import in1d
from .fixes import bincount
def compute_class_weight(class_weight, classes, y):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'balanced' or None
If 'balanced', class weights will be given by
``n_samples / (n_classes * np.bincount(y))``.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y : array-like, shape (n_samples,)
Array of original class labels per sample;
Returns
-------
class_weight_vect : ndarray, shape (n_classes,)
Array with class_weight_vect[i] the weight for i-th class
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight in ['auto', 'balanced']:
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
# inversely proportional to the number of samples in the class
if class_weight == 'auto':
recip_freq = 1. / bincount(y_ind)
weight = recip_freq[le.transform(classes)] / np.mean(recip_freq)
warnings.warn("The class_weight='auto' heuristic is deprecated in"
" favor of a new heuristic class_weight='balanced'."
" 'auto' will be removed in 0.18", DeprecationWarning)
else:
recip_freq = len(y) / (len(le.classes_) *
bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
def compute_sample_weight(class_weight, y, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Array of original class labels per sample.
indices : array-like, shape (n_subsample,), or None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "auto" is
supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray, shape (n_samples,)
Array with sample weights as applied to the original y
"""
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, six.string_types):
if class_weight not in ['balanced', 'auto']:
raise ValueError('The only valid preset for class_weight is '
'"balanced". Given "%s".' % class_weight)
elif (indices is not None and
not isinstance(class_weight, six.string_types)):
raise ValueError('The only valid class_weight for subsampling is '
'"balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if (not hasattr(class_weight, "__iter__") or
isinstance(class_weight, dict)):
raise ValueError("For multi-output, class_weight should be a "
"list of dicts, or a valid string.")
if len(class_weight) != n_outputs:
raise ValueError("For multi-output, number of elements in "
"class_weight should match number of outputs.")
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight in ['balanced', 'auto'] or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y[indices, k]
classes_subsample = np.unique(y_subsample)
weight_k = np.choose(np.searchsorted(classes_subsample,
classes_full),
compute_class_weight(class_weight_k,
classes_subsample,
y_subsample),
mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(class_weight_k,
classes_full,
y_full)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[in1d(y_full, list(classes_missing))] = 0.
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight,
axis=0,
dtype=np.float64)
return expanded_class_weight
| bsd-3-clause |
jhseu/tensorflow | tensorflow/python/autograph/core/converter.py | 1 | 13320 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/qda.py | 139 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
cmeessen/fatiando | setup.py | 5 | 3618 | """
Build extension modules, package and install Fatiando.
"""
from __future__ import absolute_import
import sys
import os
from setuptools import setup, Extension, find_packages
import numpy
import versioneer
# VERSIONEER SETUP
# #############################################################################
versioneer.VCS = 'git'
versioneer.versionfile_source = 'fatiando/_version.py'
versioneer.versionfile_build = 'fatiando/_version.py'
versioneer.tag_prefix = 'v'
versioneer.parentdir_prefix = '.'
# PACKAGE METADATA
# #############################################################################
NAME = 'fatiando'
FULLNAME = 'Fatiando a Terra'
DESCRIPTION = "Modeling and inversion for geophysics"
AUTHOR = "Leonardo Uieda"
AUTHOR_EMAIL = 'leouieda@gmail.com'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
VERSION = versioneer.get_version()
CMDCLASS = versioneer.get_cmdclass()
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
PACKAGES = find_packages(exclude=['doc', 'ci', 'cookbook', 'gallery'])
PACKAGE_DATA = {
'fatiando.datasets': ['data/*'],
'fatiando.datasets.tests': ['data/*'],
'fatiando.gravmag.tests': ['data/*'],
}
LICENSE = "BSD License"
URL = "http://www.fatiando.org"
PLATFORMS = "Any"
SCRIPTS = []
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
KEYWORDS = 'geophysics modeling inversion gravimetry seismic magnetometry'
# DEPENDENCIES
# #############################################################################
INSTALL_REQUIRES = [
'numpy',
'scipy',
'numba',
'future',
'matplotlib',
'pillow',
'jupyter',
]
# C EXTENSIONS
# #############################################################################
# The running setup.py with --cython, then set things up to generate the Cython
# .c files. If not, then compile the pre-converted C files.
use_cython = True if '--cython' in sys.argv else False
# Build the module name and the path name for the extension modules
ext = '.pyx' if use_cython else '.c'
ext_parts = [
['fatiando', 'seismic', '_ttime2d'],
['fatiando', 'seismic', '_wavefd'],
['fatiando', 'gravmag', '_prism'],
]
extensions = [('.'.join(parts), os.path.join(*parts) + ext)
for parts in ext_parts]
libs = []
if os.name == 'posix':
libs.append('m')
ext_args = dict(libraries=libs, include_dirs=[numpy.get_include()])
EXT_MODULES = [Extension(name, [path], **ext_args)
for name, path in extensions]
# Cythonize the .pyx modules if --cython is used
if use_cython:
sys.argv.remove('--cython')
from Cython.Build import cythonize
EXT_MODULES = cythonize(EXT_MODULES)
if __name__ == '__main__':
setup(name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
scripts=SCRIPTS,
packages=PACKAGES,
ext_modules=EXT_MODULES,
package_data=PACKAGE_DATA,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
cmdclass=CMDCLASS,
install_requires=INSTALL_REQUIRES)
| bsd-3-clause |
matthew-tucker/mne-python | examples/inverse/plot_label_activation_from_stc.py | 50 | 1949 | """
==================================================
Extracting time course from source_estimate object
==================================================
Load a SourceEstimate object from stc files and
extract the time course of activation in
individual labels, as well as in a complex label
formed through merging two labels.
"""
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
meg_path = data_path + '/MEG/sample'
# load the stc
stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
# load the labels
aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
# extract the time course for different labels from the stc
stc_lh = stc.in_label(aud_lh)
stc_rh = stc.in_label(aud_rh)
stc_bh = stc.in_label(aud_lh + aud_rh)
# calculate center of mass and transform to mni coordinates
vtx, _, t_lh = stc_lh.center_of_mass('sample')
mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
vtx, _, t_rh = stc_rh.center_of_mass('sample')
mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
# plot the activation
plt.figure()
plt.axes([.1, .275, .85, .625])
hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
plt.xlabel('Time (s)')
plt.ylabel('Source amplitude (dSPM)')
plt.xlim(stc.times[0], stc.times[-1])
# add a legend including center-of-mass mni coordinates to the plot
labels = ['LH: center of mass = %s' % mni_lh.round(2),
'RH: center of mass = %s' % mni_rh.round(2),
'Combined LH & RH']
plt.figlegend([hl, hr, hb], labels, 'lower center')
plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
plt.show()
| bsd-3-clause |
google-research/torchsde | tests/problems.py | 1 | 13372 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Problems of different noise types.
Each example is of a particular noise type.
ExDiagonal, ExScalar, ExAdditive are examples 1-3 from
Rackauckas, Christopher, and Qing Nie. "Adaptive methods for stochastic
differential equations via natural embeddings and rejection sampling with memory."
Discrete and continuous dynamical systems. Series B 22.7 (2017): 2731.
Neural* all use simple neural networks.
BasicSDE1-4 are problems where the drift and diffusion may not depend on
trainable parameters.
CustomNamesSDE and CustomNamesSDELogqp are used to test the argument `names`.
"""
import torch
from torch import nn
from torchsde import BaseSDE, SDEIto
from torchsde.settings import NOISE_TYPES, SDE_TYPES
class ExDiagonal(BaseSDE):
noise_type = NOISE_TYPES.diagonal
def __init__(self, d, sde_type=SDE_TYPES.ito, **kwargs):
super(ExDiagonal, self).__init__(sde_type=sde_type, noise_type=ExDiagonal.noise_type)
self._nfe = 0
# Use non-exploding initialization.
sigma = torch.sigmoid(torch.randn(d))
mu = -sigma ** 2 - torch.sigmoid(torch.randn(d))
self.mu = nn.Parameter(mu, requires_grad=True)
self.sigma = nn.Parameter(sigma, requires_grad=True)
self.f = self.f_ito if sde_type == SDE_TYPES.ito else self.f_stratonovich
def f_ito(self, t, y):
self._nfe += 1
return self.mu * y
def f_stratonovich(self, t, y):
self._nfe += 1
return self.mu * y - .5 * (self.sigma ** 2) * y
def g(self, t, y):
self._nfe += 1
return self.sigma * y
def h(self, t, y):
self._nfe += 1
return torch.zeros_like(y)
@property
def nfe(self):
return self._nfe
class ExScalar(BaseSDE):
noise_type = NOISE_TYPES.scalar
def __init__(self, d, sde_type=SDE_TYPES.ito, **kwargs):
super(ExScalar, self).__init__(sde_type=sde_type, noise_type=ExScalar.noise_type)
self._nfe = 0
self.p = nn.Parameter(torch.sigmoid(torch.randn(d)), requires_grad=True)
self.f = self.f_ito if sde_type == SDE_TYPES.ito else self.f_stratonovich
def f_ito(self, t, y):
self._nfe += 1
return -self.p ** 2. * torch.sin(y) * torch.cos(y) ** 3.
def f_stratonovich(self, t, y):
self._nfe += 1
return torch.zeros_like(y)
def g(self, t, y):
self._nfe += 1
return (self.p * torch.cos(y) ** 2).unsqueeze(dim=-1)
def h(self, t, y):
self._nfe += 1
return torch.zeros_like(y)
@property
def nfe(self):
return self._nfe
class ExAdditive(BaseSDE):
noise_type = NOISE_TYPES.additive
def __init__(self, d, m, sde_type=SDE_TYPES.ito, **kwargs):
super(ExAdditive, self).__init__(sde_type=sde_type, noise_type=ExAdditive.noise_type)
self._nfe = 0
self.m = m
self.a = nn.Parameter(torch.sigmoid(torch.randn(d)), requires_grad=True)
self.b = nn.Parameter(torch.sigmoid(torch.randn(d)), requires_grad=True)
def f(self, t, y):
self._nfe += 1
return self.b / torch.sqrt(1. + t) - y / (2. + 2. * t)
def g(self, t, y):
self._nfe += 1
fill_value = self.a * self.b / torch.sqrt(1. + t)
return fill_value.unsqueeze(dim=0).unsqueeze(dim=-1).repeat(y.size(0), 1, self.m)
def h(self, t, y):
self._nfe += 1
return torch.zeros_like(y)
@property
def nfe(self):
return self._nfe
class NeuralDiagonal(BaseSDE):
noise_type = NOISE_TYPES.diagonal
def __init__(self, d, sde_type=SDE_TYPES.ito, **kwargs):
super(NeuralDiagonal, self).__init__(sde_type=sde_type, noise_type=NeuralDiagonal.noise_type)
self.f_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d)
)
self.g_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d),
nn.Sigmoid()
)
def f(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return self.f_net(ty)
def g(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return 0.1 * self.g_net(ty) # small noise makes passing adjoint tests easier/possible
def h(self, t, y):
return torch.zeros_like(y)
class NeuralScalar(BaseSDE):
noise_type = NOISE_TYPES.scalar
def __init__(self, d, sde_type=SDE_TYPES.ito, **kwargs):
super(NeuralScalar, self).__init__(sde_type=sde_type, noise_type=NeuralScalar.noise_type)
self.f_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d)
)
self.g_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d),
nn.Sigmoid()
)
def f(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return self.f_net(ty)
def g(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return 0.1 * self.g_net(ty).unsqueeze(-1) # small noise makes passing adjoint tests easier/possible
def h(self, t, y):
return torch.zeros_like(y)
class NeuralAdditive(BaseSDE):
noise_type = NOISE_TYPES.additive
def __init__(self, d, m, sde_type=SDE_TYPES.ito, **kwargs):
super(NeuralAdditive, self).__init__(sde_type=sde_type, noise_type=NeuralAdditive.noise_type)
self.d = d
self.m = m
self.f_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d)
)
self.g_net = nn.Sequential(
nn.Linear(1, 8),
nn.Softplus(),
nn.Linear(8, d * m),
nn.Sigmoid()
)
def f(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return self.f_net(ty)
def g(self, t, y):
return self.g_net(t.expand(y.size(0), 1)).view(y.size(0), self.d, self.m)
def h(self, t, y):
return torch.zeros_like(y)
class NeuralGeneral(BaseSDE):
noise_type = NOISE_TYPES.general
def __init__(self, d, m, sde_type=SDE_TYPES.ito, **kwargs):
super(NeuralGeneral, self).__init__(sde_type=sde_type, noise_type=NeuralGeneral.noise_type)
self.d = d
self.m = m
self.f_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d)
)
self.g_net = nn.Sequential(
nn.Linear(d + 1, 8),
nn.Softplus(),
nn.Linear(8, d * m),
nn.Sigmoid()
)
def f(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return self.f_net(ty)
def g(self, t, y):
ty = torch.cat([t.expand(y.size(0), 1), y], dim=1)
return self.g_net(ty).reshape(y.size(0), self.d, self.m)
def h(self, t, y):
return torch.zeros_like(y)
class BasicSDE1(SDEIto):
def __init__(self, d=10):
super(BasicSDE1, self).__init__(noise_type="diagonal")
self.shared_param = nn.Parameter(torch.randn(1, d), requires_grad=True)
self.no_grad_param = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param1 = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param2 = nn.Parameter(torch.randn(1, d), requires_grad=True)
def f(self, t, y):
return self.shared_param * torch.sin(y) * 0.2 + torch.cos(y ** 2.) * 0.1 + torch.cos(t) + self.no_grad_param * y
def g(self, t, y):
return torch.sigmoid(self.shared_param * torch.cos(y) * .3 + torch.sin(t)) + torch.sigmoid(
self.no_grad_param * y) + 0.1
def h(self, t, y):
return torch.sigmoid(y)
class BasicSDE2(SDEIto):
def __init__(self, d=10):
super(BasicSDE2, self).__init__(noise_type="diagonal")
self.shared_param = nn.Parameter(torch.randn(1, d), requires_grad=True)
self.no_grad_param = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param1 = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param2 = nn.Parameter(torch.randn(1, d), requires_grad=True)
def f(self, t, y):
return self.shared_param * 0.2 + self.no_grad_param + torch.zeros_like(y)
def g(self, t, y):
return torch.sigmoid(self.shared_param * .3) + torch.sigmoid(self.no_grad_param) + torch.zeros_like(y) + 0.1
def h(self, t, y):
return torch.sigmoid(y)
class BasicSDE3(SDEIto):
def __init__(self, d=10):
super(BasicSDE3, self).__init__(noise_type="diagonal")
self.shared_param = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.no_grad_param = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param1 = nn.Parameter(torch.randn(1, d), requires_grad=True)
self.unused_param2 = nn.Parameter(torch.randn(1, d), requires_grad=False)
def f(self, t, y):
return self.shared_param * 0.2 + self.no_grad_param + torch.zeros_like(y)
def g(self, t, y):
return torch.sigmoid(self.shared_param * .3) + torch.sigmoid(self.no_grad_param) + torch.zeros_like(y) + 0.1
def h(self, t, y):
return torch.sigmoid(y)
class BasicSDE4(SDEIto):
def __init__(self, d=10):
super(BasicSDE4, self).__init__(noise_type="diagonal")
self.shared_param = nn.Parameter(torch.randn(1, d), requires_grad=True)
self.no_grad_param = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param1 = nn.Parameter(torch.randn(1, d), requires_grad=False)
self.unused_param2 = nn.Parameter(torch.randn(1, d), requires_grad=True)
def f(self, t, y):
return torch.zeros_like(y).fill_(0.1)
def g(self, t, y):
return torch.sigmoid(torch.zeros_like(y)) + 0.1
def h(self, t, y):
return torch.sigmoid(y)
class CustomNamesSDE(SDEIto):
def __init__(self):
super(CustomNamesSDE, self).__init__(noise_type="diagonal")
def forward(self, t, y):
return y * t
def g(self, t, y):
return torch.sigmoid(t * y)
class CustomNamesSDELogqp(SDEIto):
def __init__(self):
super(CustomNamesSDELogqp, self).__init__(noise_type="diagonal")
def forward(self, t, y):
return y * t
def g(self, t, y):
return torch.sigmoid(t * y)
def w(self, t, y):
return y * t
class FGSDE(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(FGSDE, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f(self, t, y):
return -y
def g(self, t, y):
return y.unsqueeze(-1).sigmoid() * self.vector
class FAndGSDE(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(FAndGSDE, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f_and_g(self, t, y):
return -y, y.unsqueeze(-1).sigmoid() * self.vector
class GProdSDE(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(GProdSDE, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f(self, t, y):
return -y
def g_prod(self, t, y, v):
return (y.unsqueeze(-1).sigmoid() * self.vector).bmm(v.unsqueeze(-1)).squeeze(-1)
class FAndGProdSDE(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(FAndGProdSDE, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f_and_g_prod(self, t, y, v):
return -y, (y.unsqueeze(-1).sigmoid() * self.vector).bmm(v.unsqueeze(-1)).squeeze(-1)
class FAndGGProdSDE1(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(FAndGGProdSDE1, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f_and_g(self, t, y):
return -y, y.unsqueeze(-1).sigmoid() * self.vector
def g_prod(self, t, y, v):
return (y.unsqueeze(-1).sigmoid() * self.vector).bmm(v.unsqueeze(-1)).squeeze(-1)
class FAndGGProdSDE2(torch.nn.Module):
noise_type = 'general'
def __init__(self, sde_type, vector):
super(FAndGGProdSDE2, self).__init__()
self.sde_type = sde_type
self.register_buffer('vector', vector)
def f(self, t, y):
return -y
def f_and_g(self, t, y):
return -y, y.unsqueeze(-1).sigmoid() * self.vector
def g_prod(self, t, y, v):
return (y.unsqueeze(-1).sigmoid() * self.vector).bmm(v.unsqueeze(-1)).squeeze(-1)
| apache-2.0 |
Maximilian-Reuter/SickRage | lib/guessit/test/test_main.py | 32 | 2270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2014 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.test.guessittest import *
from guessit.fileutils import file_in_same_dir
from guessit import PY2
from guessit import __main__
if PY2:
from StringIO import StringIO
else:
from io import StringIO
class TestMain(TestGuessit):
def setUp(self):
self._stdout = sys.stdout
string_out = StringIO()
sys.stdout = string_out
def tearDown(self):
sys.stdout = self._stdout
def test_list_properties(self):
__main__.main(["-p"], False)
__main__.main(["-V"], False)
def test_list_transformers(self):
__main__.main(["--transformers"], False)
__main__.main(["-V", "--transformers"], False)
def test_demo(self):
__main__.main(["-d"], False)
def test_filename(self):
__main__.main(["A.Movie.2014.avi"], False)
__main__.main(["A.Movie.2014.avi", "A.2nd.Movie.2014.avi"], False)
__main__.main(["-y", "A.Movie.2014.avi"], False)
__main__.main(["-a", "A.Movie.2014.avi"], False)
__main__.main(["-v", "A.Movie.2014.avi"], False)
__main__.main(["-t", "movie", "A.Movie.2014.avi"], False)
__main__.main(["-t", "episode", "A.Serie.S02E06.avi"], False)
__main__.main(["-i", "hash_mpc", file_in_same_dir(__file__, "1MB")], False)
__main__.main(["-i", "hash_md5", file_in_same_dir(__file__, "1MB")], False)
| gpl-3.0 |
mila-udem/fuel | fuel/config_parser.py | 1 | 7045 | """Module level configuration.
Fuel allows module-wide configuration values to be set using a YAML_
configuration file and `environment variables`_. Environment variables
override the configuration file which in its turn overrides the defaults.
The configuration is read from ``~/.fuelrc`` if it exists. A custom
configuration file can be used by setting the ``FUEL_CONFIG`` environment
variable. A configuration file is of the form:
.. code-block:: yaml
data_path: /home/user/datasets
Which could be overwritten by using environment variables:
.. code-block:: bash
$ FUEL_DATA_PATH=/home/users/other_datasets python
This data path is a sequence of paths separated by an os-specific
delimiter (':' for Linux and OSX, ';' for Windows).
If a setting is not configured and does not provide a default, a
:class:`~.ConfigurationError` is raised when it is
accessed.
Configuration values can be accessed as attributes of
:const:`fuel.config`.
>>> from fuel import config
>>> print(config.data_path) # doctest: +SKIP
'~/datasets'
The following configurations are supported:
.. option:: data_path
The path where dataset files are stored. Can also be set using the
environment variable ``FUEL_DATA_PATH``. Expected to be a sequence
of paths separated by an os-specific delimiter (':' for Linux and
OSX, ';' for Windows).
.. todo::
Implement this.
.. option:: floatX
The default :class:`~numpy.dtype` to use for floating point numbers. The
default value is ``float64``. A lower value can save memory.
.. option:: local_data_path
The local path where the dataset is going to be copied. This is a useful
option for slow network file systems. The dataset is copied once to a
local directory and reused later. Currently, caching is implemented
for :class:`H5PYDataset` and therefore for the majority of builtin
datasets. In order to use caching with your own dataset refer to the
caching documentation: :func:`cache_file`.
.. option:: extra_downloaders
A list of package names which, like fuel.downloaders, define an
`all_downloaders` attribute listing available downloaders. By default,
an empty list.
.. option:: extra_converters
A list of package names which, like fuel.converters, define an
`all_converters` attribute listing available converters. By default,
an empty list.
.. _YAML: http://yaml.org/
.. _environment variables:
https://en.wikipedia.org/wiki/Environment_variable
"""
import logging
import os
import six
import yaml
from .exceptions import ConfigurationError
logger = logging.getLogger(__name__)
NOT_SET = object()
def extra_downloader_converter(value):
"""Parses extra_{downloader,converter} arguments.
Parameters
----------
value : iterable or str
If the value is a string, it is split into a list using spaces
as delimiters. Otherwise, it is returned as is.
"""
if isinstance(value, six.string_types):
value = value.split(" ")
return value
def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value
class Configuration(object):
def __init__(self):
self.config = {}
def load_yaml(self):
if 'FUEL_CONFIG' in os.environ:
yaml_file = os.environ['FUEL_CONFIG']
else:
yaml_file = os.path.expanduser('~/.fuelrc')
if os.path.isfile(yaml_file):
with open(yaml_file) as f:
for key, value in yaml.safe_load(f).items():
if key not in self.config:
raise ValueError("Unrecognized config in YAML: {}"
.format(key))
self.config[key]['yaml'] = value
def __getattr__(self, key):
if key == 'config' or key not in self.config:
raise AttributeError
config_setting = self.config[key]
if 'value' in config_setting:
value = config_setting['value']
elif ('env_var' in config_setting and
config_setting['env_var'] in os.environ):
value = os.environ[config_setting['env_var']]
elif 'yaml' in config_setting:
value = config_setting['yaml']
elif 'default' in config_setting:
value = config_setting['default']
else:
raise ConfigurationError("Configuration not set and no default "
"provided: {}.".format(key))
return config_setting['type'](value)
def __setattr__(self, key, value):
if key != 'config' and key in self.config:
self.config[key]['value'] = value
else:
super(Configuration, self).__setattr__(key, value)
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default
config = Configuration()
# Define configuration options
config.add_config('data_path', type_=multiple_paths_parser,
env_var='FUEL_DATA_PATH')
config.add_config('local_data_path', type_=str,
env_var='FUEL_LOCAL_DATA_PATH', default="")
config.add_config('default_seed', type_=int, default=1)
config.add_config('extra_downloaders', type_=extra_downloader_converter,
default=[], env_var='FUEL_EXTRA_DOWNLOADERS')
config.add_config('extra_converters', type_=extra_downloader_converter,
default=[], env_var='FUEL_EXTRA_CONVERTERS')
config.add_config('floatX', type_=str, env_var='FUEL_FLOATX')
config.load_yaml()
| mit |
matthew-tucker/mne-python | examples/stats/plot_cluster_stats_spatio_temporal_2samp.py | 5 | 4284 | """
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_tris_connectivity, grade_to_tris
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50)
stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20,
subjects_dir=subjects_dir)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
threshold=f_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('clusters.png')
| bsd-3-clause |
uber/pyro | tests/contrib/bnn/test_hidden_layer.py | 1 | 2100 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.nn.functional as F
from torch.distributions import Normal
from pyro.contrib.bnn import HiddenLayer
from tests.common import assert_equal
@pytest.mark.parametrize("non_linearity", [F.relu])
@pytest.mark.parametrize("include_hidden_bias", [False, True])
def test_hidden_layer_rsample(
non_linearity, include_hidden_bias, B=2, D=3, H=4, N=900000
):
X = torch.randn(B, D)
A_mean = torch.rand(D, H)
A_scale = 0.3 * torch.exp(0.3 * torch.rand(D, H))
# test naive weight space sampling against sampling in pre-activation space
dist1 = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
weight_space_sampling=True,
)
dist2 = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
weight_space_sampling=False,
)
out1 = dist1.rsample(sample_shape=(N,))
out1_mean, out1_var = out1.mean(0), out1.var(0)
out2 = dist2.rsample(sample_shape=(N,))
out2_mean, out2_var = out2.mean(0), out2.var(0)
assert_equal(out1_mean, out2_mean, prec=0.003)
assert_equal(out1_var, out2_var, prec=0.003)
return
@pytest.mark.parametrize("non_linearity", [F.relu])
@pytest.mark.parametrize("include_hidden_bias", [True, False])
def test_hidden_layer_log_prob(non_linearity, include_hidden_bias, B=2, D=3, H=2):
X = torch.randn(B, D)
A_mean = torch.rand(D, H)
A_scale = 0.3 * torch.exp(0.3 * torch.rand(D, H))
dist = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
)
A_dist = Normal(A_mean, A_scale)
A_prior = Normal(torch.zeros(D, H), torch.ones(D, H))
kl = torch.distributions.kl.kl_divergence(A_dist, A_prior).sum()
assert_equal(kl, dist.KL, prec=0.01)
| apache-2.0 |
matthew-tucker/mne-python | mne/tests/test_transforms.py | 10 | 7111 | import os
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
assert_almost_equal, assert_array_almost_equal)
import warnings
from mne.io.constants import FIFF
from mne.datasets import testing
from mne import read_trans, write_trans
from mne.utils import _TempDir, run_tests_if_main
from mne.transforms import (invert_transform, _get_mri_head_t,
rotation, rotation3d, rotation_angles, _find_trans,
combine_transforms, transform_coordinates,
collect_transforms, apply_trans, translation,
get_ras_to_neuromag_trans, _sphere_to_cartesian,
_polar_to_cartesian, _cartesian_to_sphere)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
fname_eve = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
fname_trans = op.join(op.split(__file__)[0], '..', 'io', 'tests',
'data', 'sample-audvis-raw-trans.txt')
@testing.requires_testing_data
def test_get_mri_head_t():
"""Test converting '-trans.txt' to '-trans.fif'"""
trans = read_trans(fname)
trans = invert_transform(trans) # starts out as head->MRI, so invert
trans_2 = _get_mri_head_t(fname_trans)[0]
assert_equal(trans['from'], trans_2['from'])
assert_equal(trans['to'], trans_2['to'])
assert_allclose(trans['trans'], trans_2['trans'], rtol=1e-5, atol=1e-5)
@testing.requires_testing_data
def test_io_trans():
"""Test reading and writing of trans files
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
assert_raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
trans0 = read_trans(fname)
fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
write_trans(fname1, trans0)
assert_true(fname1 == _find_trans('sample', subjects_dir=tempdir))
trans1 = read_trans(fname1)
# check all properties
assert_true(trans0['from'] == trans1['from'])
assert_true(trans0['to'] == trans1['to'])
assert_array_equal(trans0['trans'], trans1['trans'])
# check reading non -trans.fif files
assert_raises(IOError, read_trans, fname_eve)
# check warning on bad filenames
with warnings.catch_warnings(record=True) as w:
fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
write_trans(fname2, trans0)
assert_true(len(w) >= 1)
def test_get_ras_to_neuromag_trans():
"""Test the coordinate transformation from ras to neuromag"""
# create model points in neuromag-like space
anterior = [0, 1, 0]
left = [-1, 0, 0]
right = [.8, 0, 0]
up = [0, 0, 1]
rand_pts = np.random.uniform(-1, 1, (3, 3))
pts = np.vstack((anterior, left, right, up, rand_pts))
# change coord system
rx, ry, rz, tx, ty, tz = np.random.uniform(-2 * np.pi, 2 * np.pi, 6)
trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
pts_changed = apply_trans(trans, pts)
# transform back into original space
nas, lpa, rpa = pts_changed[:3]
hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
pts_restored = apply_trans(hsp_trans, pts_changed)
err = "Neuromag transformation failed"
assert_array_almost_equal(pts_restored, pts, 6, err)
def test_sphere_to_cartesian():
"""Test helper transform function from sphere to cartesian"""
phi, theta, r = (np.pi, np.pi, 1)
# expected value is (1, 0, 0)
z = r * np.sin(phi)
rcos_phi = r * np.cos(phi)
x = rcos_phi * np.cos(theta)
y = rcos_phi * np.sin(theta)
coord = _sphere_to_cartesian(phi, theta, r)
# np.pi is an approx since pi is irrational
assert_almost_equal(coord, (x, y, z), 10)
assert_almost_equal(coord, (1, 0, 0), 10)
def test_polar_to_cartesian():
"""Test helper transform function from polar to cartesian"""
r = 1
theta = np.pi
# expected values are (-1, 0)
x = r * np.cos(theta)
y = r * np.sin(theta)
coord = _polar_to_cartesian(theta, r)
# np.pi is an approx since pi is irrational
assert_almost_equal(coord, (x, y), 10)
assert_almost_equal(coord, (-1, 0), 10)
def test_cartesian_to_sphere():
"""Test helper transform function from cartesian to sphere"""
x, y, z = (1, 0, 0)
# expected values are (0, 0, 1)
hypotxy = np.hypot(x, y)
r = np.hypot(hypotxy, z)
elev = np.arctan2(z, hypotxy)
az = np.arctan2(y, x)
coord = _cartesian_to_sphere(x, y, z)
assert_equal(coord, (az, elev, r))
assert_equal(coord, (0, 0, 1))
def test_rotation():
"""Test conversion between rotation angles and transformation matrix
"""
tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
for rot in tests:
x, y, z = rot
m = rotation3d(x, y, z)
m4 = rotation(x, y, z)
assert_array_equal(m, m4[:3, :3])
back = rotation_angles(m)
assert_equal(back, rot)
back4 = rotation_angles(m4)
assert_equal(back4, rot)
@testing.requires_testing_data
def test_combine():
"""Test combining transforms
"""
trans = read_trans(fname)
inv = invert_transform(trans)
combine_transforms(trans, inv, trans['from'], trans['from'])
assert_raises(RuntimeError, combine_transforms, trans, inv,
trans['to'], trans['from'])
assert_raises(RuntimeError, combine_transforms, trans, inv,
trans['from'], trans['to'])
assert_raises(RuntimeError, combine_transforms, trans, trans,
trans['from'], trans['to'])
@testing.requires_testing_data
def test_transform_coords():
"""Test transforming coordinates
"""
# normal trans won't work
assert_raises(ValueError, transform_coordinates,
fname, np.eye(3), 'meg', 'fs_tal')
# needs to have all entries
pairs = [[FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD],
[FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_MNE_COORD_RAS],
[FIFF.FIFFV_MNE_COORD_RAS, FIFF.FIFFV_MNE_COORD_MNI_TAL],
[FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ],
[FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ],
]
xforms = []
for fro, to in pairs:
xforms.append({'to': to, 'from': fro, 'trans': np.eye(4)})
tempdir = _TempDir()
all_fname = op.join(tempdir, 'all-trans.fif')
collect_transforms(all_fname, xforms)
for fro in ['meg', 'mri']:
for to in ['meg', 'mri', 'fs_tal', 'mni_tal']:
out = transform_coordinates(all_fname, np.eye(3), fro, to)
assert_allclose(out, np.eye(3))
assert_raises(ValueError,
transform_coordinates, all_fname, np.eye(4), 'meg', 'meg')
assert_raises(ValueError,
transform_coordinates, all_fname, np.eye(3), 'fs_tal', 'meg')
run_tests_if_main()
| bsd-3-clause |
amikiri/postgeo | cellspacer.py | 2 | 5295 | #!/usr/bin/env python
"""
This script will take a lat-long pair (e.g., "-80.123, 45.678") in the final column, and determine if any
other lines are at that exactly named pair. If so, it scatters them around in a circle.
So if other points are adjacent or identical but differently named (-80.123 vs. -80.1230), this won't help
much. It works for what it is, and it's not meant to do more.
Scattering distance can be specified on the command-line using the -m flag).
This may use a lot of memory on large datasets, so you might want it to work off a database. I didn't.
"""
from __future__ import print_function
import argparse
import csv
import os
import sys
from geopy.distance import VincentyDistance
masterdict = {}
flagonsolo = 987654321
output_string = "Latlong: {:20} Area count: {} Index count: {:>9} Spaced: {:>} Bearing {}"
processed_string = "Processed {} rows at {} locations."
def set_spot(latlong):
if latlong in masterdict.keys():
masterdict[latlong][0] += 1
else:
masterdict[latlong] = [1, -1]
# So if we have a value, we know this spot already and we should add to the count.
# If we don't have a value, we need to create one, set to the 1.
def get_spot(latlong):
indexvalue = masterdict[latlong][1]
if indexvalue == flagonsolo:
pass
else:
masterdict[latlong][1] = indexvalue + 1
return (indexvalue)
# Let's peel the latest index count off, and increment by 1, unless this is the only spot at the location.
def main(spacing, verbose=0):
inputfilename = args.filename
outputfilename = inputfilename[:inputfilename.rfind(".")] + "-jitter" + inputfilename[inputfilename.rfind("."):]
with open(inputfilename, 'r') as inputfilehandle:
rows = csv.reader(inputfilehandle)
headers = next(rows)
for row in rows:
latlong = row[-1]
# We're only grabbing latlong from last field
set_spot(latlong)
# masterdict should now have all latlongs, and their counts
# and inputfilehandle should now be closed.
# Let's now set up our spot dictionary:
for key in masterdict:
if masterdict[key][0] == 1:
masterdict[key][1] = flagonsolo
else:
masterdict[key][1] = 0
# We should be ready to start processing. Our masterdict holds a list keyed to each unique latlong in our
# source CSV. [0] holds how many values are at that index. [1] holds an index to which one of those things
# we're processing, so we know where to jigger it.
# flagonsolo gives us a value that says, "Only one point at this latlong. Don't mess with it."
if os.path.isfile(outputfilename):
message = "File {} exists, proceeding will overwrite(y or n)? "
proceed_prompt = get_input(message.format(outputfilename))
if proceed_prompt.lower() == 'y':
pass
else:
print('Aborting . . .')
exit()
with open(outputfilename, 'w') as outputfile:
put = csv.writer(outputfile, lineterminator='\n')
with open(inputfilename, 'r') as inputfilehandle:
rows = csv.reader(inputfilehandle)
headers = next(rows)
headers.append("RowsHere")
headers.append("latlongspaced")
put.writerow(headers)
for row in rows:
latlong = row[-1]
indexvalue = get_spot(latlong)
if indexvalue == flagonsolo:
latlongspaced = latlong
areacount = 1
bearing = -1
else:
areacount = masterdict[latlong][0]
bearing = (indexvalue * 360 / areacount)
destination = VincentyDistance(meters=spacing).destination(latlong, bearing)
latlongspaced = str(destination.latitude) + ", " + str(destination.longitude)
if verbose == 1:
print(output_string.format(latlong, areacount, indexvalue, latlongspaced, bearing))
row.append(str(areacount))
row.append(latlongspaced)
put.writerow(row)
if verbose == 1:
linecount = 0
for key in masterdict:
linecount = linecount + masterdict[key][0]
print(processed_string.format(linecount, len(masterdict)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Lat-longs to scatter")
parser.add_argument('filename', metavar='filename', help='CSV file containing Lat-longs to scatter')
parser.add_argument("-v", help="turn on verbose output", action="store_true")
parser.add_argument("-m", help="set distance in meters for spacing", default=100, type=int)
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
if args.filename.lower().endswith('.csv'):
if args.v:
main(verbose=1, spacing=args.m)
else:
main(spacing=args.m)
else:
print("File must be of type CSV and end with .csv extension") | mit |
Wikidata/StrepHit | strephit/corpus_analysis/rank_verbs.py | 1 | 8831 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import json
import logging
from collections import defaultdict, OrderedDict
from sys import exit
from numpy import average
import click
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from strephit.commons.io import load_corpus, load_scraped_items
from strephit.commons import parallel
logger = logging.getLogger(__name__)
VERBAL_PREFIXES = {
'en': 'V',
'it': 'VER',
}
def get_similarity_scores(verb_token, vectorizer, tf_idf_matrix):
""" Compute the cosine similarity score of a given verb token against the input corpus TF/IDF matrix.
:param str verb_token: Surface form of a verb, e.g., *born*
:param sklearn.feature_extraction.text.TfidfVectorizer vectorizer: Vectorizer
used to transform verbs into vectors
:return: cosine similarity score
:rtype: ndarray
"""
verb_token_vector = vectorizer.transform([verb_token])
# Here the linear kernel is the same as the cosine similarity, but faster
# cf. http://scikit-learn.org/stable/modules/metrics.html#cosine-similarity
scores = linear_kernel(verb_token_vector, tf_idf_matrix)
logger.debug("Corpus-wide TF/IDF scores for '%s': %s" % (verb_token, scores))
logger.debug("Average TF/IDF score for '%s': %f" % (verb_token, average(scores)))
return scores
def produce_lemma_tokens(pos_tagged_path, pos_tag_key, language):
""" Extracts a map from lemma to all its tokens
:param str pos_tagged_path: path of the pos-tagged corpus
:param str pos_tag_key: where the pos tag data is in each item
:param language: language of the corpus
:return: mapping from lemma to tokens
:rtype: dict
"""
corpus = load_scraped_items(pos_tagged_path)
lemma_tokens = defaultdict(set)
for item in corpus:
for token, pos, lemma in item.get(pos_tag_key, []):
if pos.startswith(VERBAL_PREFIXES[language]):
lemma_tokens[lemma.lower()].add(token.lower())
return lemma_tokens
def compute_tf_idf_matrix(corpus_path, document_key):
""" Computes the TF-IDF matrix of the corpus
:param str corpus_path: path of the corpus
:param str document_key: where the textual content is in the corpus
:return: a vectorizer and the computed matrix
:rtype: tuple
"""
corpus = load_corpus(corpus_path, document_key, text_only=True)
vectorizer = TfidfVectorizer()
return vectorizer, vectorizer.fit_transform(corpus)
class TFIDFRanking:
""" Computes TF-IDF based rankings.
The first ranking is based on the average TF-IDF score of each lemma over all corpus
The second ranking is based on the average standard deviation of TF-IDF scores
of each lemma over all corpus
"""
def __init__(self, vectorizer, verbs, tfidf_matrix):
self.vectorizer = vectorizer
self.verbs = verbs
self.tfidf_matrix = tfidf_matrix
def score_lemma(self, lemma):
""" Computess TF-IDF based score of a single lemma
:param str lemma: The lemma to score
:return: tuple with lemma, average tf-idf, average of tf-idf standard deviations
:rtype: tuple of (str, float, float)
"""
tf_idfs, st_devs = [], []
for token in self.verbs[lemma]:
scores = get_similarity_scores(token, self.vectorizer, self.tfidf_matrix)
tf_idfs += filter(None, scores.flatten().tolist())
st_devs.append(scores.std())
return lemma, average(tf_idfs), average(st_devs)
def find_ranking(self, processes=0):
""" Ranks the verbs
:param int processes: How many processes to use for parallel ranking
:return: tuple with average tf-idf and average standard deviation ordered rankings
:rtype: tuple of (OrderedDict, OrderedDict)
"""
tfidf_ranking = {}
stdev_ranking = {}
for lemma, tfidf, stdev in parallel.map(self.score_lemma, self.verbs, processes):
tfidf_ranking[lemma] = tfidf
stdev_ranking[lemma] = stdev
return (OrderedDict(sorted(tfidf_ranking.items(), key=lambda x: x[1], reverse=True)),
OrderedDict(sorted(stdev_ranking.items(), key=lambda x: x[1], reverse=True)))
class PopularityRanking:
""" Ranking based on the popularity of each verb. Simply counts the
frequency of each lemma over all corpus
"""
def __init__(self, corpus_path, pos_tag_key):
self.tags = self._flatten(item.get(pos_tag_key) for item in load_scraped_items(corpus_path))
@staticmethod
def _flatten(iterable):
for each in iterable:
for x in each:
yield x
@staticmethod
def _bulkenize(iterable, bulk_size):
acc = []
for each in iterable:
acc.append(each)
if len(acc) % bulk_size == 0:
yield acc
acc = []
if acc:
yield acc
@staticmethod
def score_from_tokens(tokens):
scores = defaultdict(int)
for token, pos, lemma in tokens:
if pos.startswith('V'):
scores[lemma.lower()] += 1
return scores
def find_ranking(self, processes=0, bulk_size=10000, normalize=True):
ranking = defaultdict(int)
for score in parallel.map(self.score_from_tokens,
self._bulkenize(self.tags, bulk_size),
processes):
for k, v in score.iteritems():
ranking[k] += v
ranking = OrderedDict(sorted(ranking.items(), key=lambda x: x[1], reverse=True))
if normalize:
max_score = float(ranking[next(iter(ranking))])
for lemma, score in ranking.iteritems():
ranking[lemma] = score / max_score
return ranking
def harmonic_ranking(*rankings):
""" Combines individual rankings with an harmonic mean to obtain a final ranking
:param rankings: dictionary of individual rankings
:return: the new, combined ranking
"""
def product(x, y):
return x * y
def sum(x, y):
return x + y
def get(k):
return (r[k] for r in rankings)
lemmas = reduce(lambda x, y: x.union(y), (set(r) for r in rankings))
return OrderedDict(sorted(
[(l, len(rankings) * reduce(product, get(l)) / (1 + reduce(sum, get(l)))) for l in lemmas],
key=lambda (_, v): v,
reverse=True
))
@click.command()
@click.argument('pos_tagged', type=click.Path(exists=True, dir_okay=False))
@click.argument('document_key')
@click.argument('language')
@click.option('--pos-tag-key', default='pos_tag')
@click.option('--dump-verbs', type=click.File('w'), default='output/verbs.json')
@click.option('--dump-tf-idf', type=click.File('w'), default='output/tf_idf_ranking.json')
@click.option('--dump-stdev', type=click.File('w'), default='output/stdev_ranking.json')
@click.option('--dump-popularity', type=click.File('w'), default='output/popularity_ranking.json')
@click.option('--dump-final', type=click.File('w'), default='output/verb_ranking.json')
@click.option('--processes', '-p', default=0)
def main(pos_tagged, document_key, pos_tag_key, language, dump_verbs, dump_tf_idf,
dump_stdev, dump_popularity, dump_final, processes):
""" Computes the three verb rankings: average TF-IDF, average of TF-IDF
standard deviation and popularity.
"""
logger.info('Computing lemma-to-token map and TF-IDF matrix ...')
lemma_tokens, (vectorizer, tf_idf_matrix) = parallel.execute(
2,
produce_lemma_tokens, (pos_tagged, pos_tag_key, language),
compute_tf_idf_matrix, (pos_tagged, document_key)
)
logger.info('Scoring verbs by popularity ...')
pop_ranking = PopularityRanking(pos_tagged, pos_tag_key).find_ranking(processes)
logger.info('Scoring verbs by TF-IDF based metrics (average and standard deviation) ...')
tfidf_ranking, stdev_ranking = TFIDFRanking(vectorizer, lemma_tokens, tf_idf_matrix).find_ranking(processes)
logger.info('Producing combined final ranking ...')
final_ranking = harmonic_ranking(pop_ranking, tfidf_ranking, stdev_ranking)
json.dump(tfidf_ranking, dump_tf_idf, indent=2)
json.dump(stdev_ranking, dump_stdev, indent=2)
json.dump(pop_ranking, dump_popularity, indent=2)
json.dump(final_ranking, dump_final, indent=2)
logger.info('Dumped all the rankings to %s' % [dump_tf_idf.name, dump_stdev.name, dump_popularity.name, dump_final.name])
json.dump(lemma_tokens, dump_verbs, default=lambda x: list(x), indent=2)
logger.info("Dumped lemma-to-token map to '%s'" % dump_verbs.name)
| gpl-3.0 |
justincassidy/scikit-learn | sklearn/datasets/mldata.py | 306 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
pystruct/pystruct | pystruct/tests/test_learners/test_primal_dual.py | 5 | 1151 | #from crf import BinaryGridCRF
#from structured_svm import NSlackSSVM, SubgradientSSVM
#from structured_svm import objective_primal, PrimalDSStructuredSVM
#from toy_datasets import binary
#def test_primal_dual_binary():
#for C in [1, 100, 100000]:
#for dataset in binary:
#X, Y = dataset(n_samples=1)
#crf = BinaryGridCRF()
#clf = NSlackSSVM(model=crf, max_iter=200, C=C,
#check_constraints=True)
#clf.fit(X, Y)
#clf2 = SubgradientSSVM(model=crf, max_iter=200, C=C)
#clf2.fit(X, Y)
#clf3 = PrimalDSStructuredSVM(model=crf, max_iter=200, C=C)
#clf3.fit(X, Y)
#obj = objective_primal(crf, clf.w, X, Y, C)
## the dual finds the optimum so it might be better
#obj2 = objective_primal(crf, clf2.w, X, Y, C)
#obj3 = objective_primal(crf, clf3.w, X, Y, C)
#assert(obj <= obj2)
#assert(obj <= obj3)
#print("objective difference: %f\n" % (obj2 - obj))
#print("objective difference DS: %f\n" % (obj3 - obj))
#test_primal_dual_binary()
| bsd-2-clause |
rbharath/deepchem | deepchem/molnet/run_benchmark.py | 1 | 10581 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 06 14:25:40 2017
@author: Zhenqin Wu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import time
import csv
import numpy as np
import tensorflow as tf
import deepchem
from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression
from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit
def run_benchmark(datasets,
model,
split=None,
metric=None,
featurizer=None,
n_features=0,
out_path='.',
hyper_parameters=None,
test=False,
reload=True,
seed=123):
"""
Run benchmark test on designated datasets with deepchem(or user-defined) model
Parameters
----------
datasets: list of string
choice of which datasets to use, should be: bace_c, bace_r, bbbp, chembl,
clearance, clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba,
pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast
model: string or user-defined model stucture
choice of which model to use, deepchem provides implementation of
logistic regression, random forest, multitask network,
bypass multitask network, irv, graph convolution;
for user define model, it should include function: fit, evaluate
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
metric: string, optional (default=None)
choice of evaluation metrics, None = using the default metrics(AUC & R2)
featurizer: string or dc.feat.Featurizer, optional (default=None)
choice of featurization, None = using the default corresponding to model
(string only applicable to deepchem models)
n_features: int, optional(default=0)
depending on featurizers, redefined when using deepchem featurizers,
need to be specified for user-defined featurizers(if using deepchem models)
out_path: string, optional(default='.')
path of result file
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
test: boolean, optional(default=False)
whether to evaluate on test set
reload: boolean, optional(default=True)
whether to save and reload featurized datasets
"""
for dataset in datasets:
if dataset in [
'bace_c', 'bbbp', 'clintox', 'hiv', 'muv', 'pcba', 'sider', 'tox21',
'toxcast'
]:
mode = 'classification'
if metric == None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.roc_auc_score, np.mean),
deepchem.metrics.Metric(deepchem.metrics.prc_auc_score, np.mean)
]
elif dataset in [
'bace_r', 'chembl', 'clearance', 'delaney', 'hopv', 'kaggle', 'lipo',
'nci', 'pdbbind', 'ppb', 'qm7', 'qm7b', 'qm8', 'qm9', 'sampl'
]:
mode = 'regression'
if metric == None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.pearson_r2_score, np.mean)
]
else:
raise ValueError('Dataset not supported')
if featurizer == None and isinstance(model, str):
# Assigning featurizer if not user defined
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
continue
if not split in [None] + CheckSplit[dataset]:
continue
loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'kaggle': deepchem.molnet.load_kaggle,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pdbbind': deepchem.molnet.load_pdbbind_grid,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7_from_mat,
'qm7b': deepchem.molnet.load_qm7b_from_mat,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast
}
print('-------------------------------------')
print('Benchmark on dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split, reload=reload)
else:
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, reload=reload)
train_dataset, valid_dataset, test_dataset = all_dataset
time_start_fitting = time.time()
train_score = {}
valid_score = {}
test_score = {}
if isinstance(model, str):
if mode == 'classification':
train_score, valid_score, test_score = benchmark_classification(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
elif mode == 'regression':
train_score, valid_score, test_score = benchmark_regression(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
else:
model.fit(train_dataset)
train_score['user_defined'] = model.evaluate(train_dataset, metric,
transformers)
valid_score['user_defined'] = model.evaluate(valid_dataset, metric,
transformers)
if test:
test_score['user_defined'] = model.evaluate(test_dataset, metric,
transformers)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'), 'a') as f:
writer = csv.writer(f)
model_name = list(train_score.keys())[0]
for i in train_score[model_name]:
output_line = [
dataset, str(split), mode, model_name, i, 'train',
train_score[model_name][i], 'valid', valid_score[model_name][i]
]
if test:
output_line.extend(['test', test_score[model_name][i]])
output_line.extend(
['time_for_running', time_finish_fitting - time_start_fitting])
writer.writerow(output_line)
#
# Note by @XericZephyr. Reason why I spun off this function:
# 1. Some model needs dataset information.
# 2. It offers us possibility to **cache** the dataset
# if the featurizer runs very slow, e.g., GraphConv.
# 2+. The cache can even happen at Travis CI to accelerate
# CI testing.
#
def load_dataset(dataset, featurizer, split='random'):
"""
Load specific dataset for benchmark.
Parameters
----------
dataset: string
choice of which datasets to use, should be: tox21, muv, sider,
toxcast, pcba, delaney, kaggle, nci, clintox, hiv, pdbbind, chembl,
qm7, qm7b, qm9, sampl
featurizer: string or dc.feat.Featurizer.
choice of featurization.
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
"""
dataset_loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'kaggle': deepchem.molnet.load_kaggle,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pdbbind': deepchem.molnet.load_pdbbind_grid,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7_from_mat,
'qm7b': deepchem.molnet.load_qm7b_from_mat,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast
}
print('-------------------------------------')
print('Loading dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = dataset_loading_functions[dataset](
featurizer=featurizer, split=split)
return tasks, all_dataset, transformers
def benchmark_model(model, all_dataset, transformers, metric, test=False):
"""
Benchmark custom model.
model: user-defined model stucture
For user define model, it should include function: fit, evaluate.
all_dataset: (train, test, val) data tuple.
Returned by `load_dataset` function.
transformers
metric: string
choice of evaluation metrics.
"""
time_start_fitting = time.time()
train_score = .0
valid_score = .0
test_score = .0
train_dataset, valid_dataset, test_dataset = all_dataset
model.fit(train_dataset)
train_score = model.evaluate(train_dataset, metric, transformers)
valid_score = model.evaluate(valid_dataset, metric, transformers)
if test:
test_score = model.evaluate(test_dataset, metric, transformers)
time_finish_fitting = time.time()
time_for_running = time_finish_fitting - time_start_fitting
return train_score, valid_score, test_score, time_for_running
| mit |
Lawrence-Liu/scikit-learn | benchmarks/bench_glmnet.py | 295 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
aagnone3/audio_analysis | learn.py | 1 | 5803 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 06 16:55:34 2016
@author: aagnone3
"""
from __future__ import print_function
import csv
import glob
import logging
import os
from learning.classification import AudioClassifier, GMMAudioClassifier
from common import environment
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn import mixture
from sklearn.naive_bayes import GaussianNB
# data directories and paths
DATA_DIR = os.sep.join(['res', 'data', 'speaker_recognition'])
TRAIN_DATA_DIR = os.sep.join([DATA_DIR, 'train'])
TEST_DATA_DIR = os.sep.join([DATA_DIR, 'test'])
# training file information
TRAIN_FILES = glob.glob(os.sep.join((TRAIN_DATA_DIR, "*.wav")))
TRAIN_FILE_PREFIXES = [f[:f.find("_")] for f in TRAIN_FILES]
TRAIN_FILE_COUNTS = [TRAIN_FILE_PREFIXES.count(p) for p in np.unique(TRAIN_FILE_PREFIXES)]
# testing file information
TEST_FILES = glob.glob(os.sep.join((TEST_DATA_DIR, "*.wav")))
TEST_FILE_PREFIXES = [f[:f.find("_")] for f in TEST_FILES]
TEST_FILE_COUNTS = [TEST_FILE_PREFIXES.count(p) for p in np.unique(TEST_FILE_PREFIXES)]
# keywords for naming
KEYWORD = 'Test'
ALGORITHM = "audio_classification"
def evaluate_classifier(paths, learner, extract_features=True):
"""
Performs training and testing with the specified learner by extracting features from the files in the training
and testing directories specified globally
:param paths: paths to save learning results
:param learner: learner to use
:param extract_features: whether to extract features from the training and testing directories. If set to False,
the classifier assumes that training and testing feature matrices exist at paths['train_data'] and
paths['test_data'], respectively
:return: None
"""
# instantiate the classifier with the passed learner
classifier = AudioClassifier(learner)
# train the classifier
if extract_features:
[y_train_predict, y_train_actual] = classifier.train_model(data_directory=TRAIN_DATA_DIR,
save_model_to=paths['model_name'],
save_features_to=paths['train_data'])
else:
[y_train_predict, y_train_actual] = classifier.train_model(data_directory=TRAIN_DATA_DIR,
save_model_to=paths['model_name'],
load_features_from=paths['train_data'])
# test the classifier
if extract_features:
[y_test_predict, y_test_actual] = AudioClassifier.test_model(
classifier=AudioClassifier.load_model(paths['model_name']),
data_directory=TEST_DATA_DIR,
save_features_to=paths['test_data'])
else:
[y_test_predict, y_test_actual] = AudioClassifier.test_model(
classifier=AudioClassifier.load_model(paths['model_name']),
load_features_from=paths['test_data'])
process_results(y_train_predict, y_train_actual, paths['train_results'])
process_results(y_test_predict, y_test_actual, paths['test_results'])
def process_results(y_predict, y_actual, results_file_name):
"""
Prints a simple analysis of test results to the console
:param y_predict: predicted class labels
:param y_actual: actual class labels
:param results_file_name: path to use for saving off results
:return: None
"""
environment.print_lines(1)
# compute simple statistics on the results
columns = ['Overall Accuracy (%)']
metrics = {
'percents': [100.0 * np.mean(y_predict == y_actual)],
'partials': [np.sum(y_predict == y_actual)],
'wholes': [len(y_actual)]
}
for label in np.unique(y_actual):
columns.append("%s Accuracy (%%)" % label)
metrics['percents'].append(100.0 * np.mean(y_predict[y_actual == label] == y_actual[y_actual == label]))
metrics['partials'].append(np.sum(y_predict[y_actual == label] == y_actual[y_actual == label]))
metrics['wholes'].append(len(y_actual[y_actual == label]))
# write the results to a file
with open(results_file_name, 'w') as csv_file:
writer = csv.writer(csv_file)
for i in range(y_predict.shape[0]):
writer.writerow([y_predict[i], y_actual[i]])
csv_file.close()
# print the results to the command line
for i, percent in enumerate(metrics['percents']):
print("{:22}:\t\t{:6.2f} %\t\t{:2d}/{:2d}".format(columns[i],
percent,
metrics['partials'][i],
metrics['wholes'][i]))
return metrics, columns
if __name__ == '__main__':
logging.basicConfig(filename='application.log',
level=logging.INFO,
format='%(asctime)s\t\t%(message)s')
logging.info('Hello logging world')
print("Training file counts {}".format(TRAIN_FILE_COUNTS))
paths = {
'model_name': os.sep.join(['res', 'models', ALGORITHM]),
'train_results': os.sep.join(['res', 'results', ALGORITHM + '_train.csv']),
'test_results': os.sep.join(['res', 'results', ALGORITHM + '_test.csv']),
'train_data': os.sep.join(['res', 'features', '']) + 'TrainData_' + KEYWORD + '.pkl',
'test_data': os.sep.join(['res', 'features', '']) + 'TestData_' + KEYWORD + '.pkl'
}
environment.print_lines(5)
print("Testing file counts {}".format(TEST_FILE_COUNTS))
# learner = mixture.GMM(n_components=len(TRAINING_DIRS))
# learner = GaussianNB()
learner = SVC()
evaluate_classifier(paths, learner)
| apache-2.0 |
harshaneelhg/scikit-learn | examples/classification/plot_classification_probability.py | 241 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
Wikidata/StrepHit | strephit/classification/train.py | 1 | 6908 | # -*- encoding: utf-8 -*-
import json
import logging
from importlib import import_module
from inspect import getargspec
import click
from sklearn.dummy import DummyClassifier
from sklearn.externals import joblib
from sklearn import metrics
from sklearn.cross_validation import KFold
import numpy as np
from strephit.commons.classification import reverse_gazetteer
from strephit.classification.model_selection import Scorer
from strephit.classification.classifiers import FeatureSelectedClassifier
from sklearn.preprocessing import MultiLabelBinarizer
logger = logging.getLogger(__name__)
def initialize(cls_name, args, call_init):
path = cls_name.split('.')
module = '.'.join(path[:-1])
cls = getattr(import_module(module), path[-1])
arg_names, _, _, arg_default = getargspec(cls.__init__)
defaults = dict(zip(reversed(arg_names), reversed(arg_default)))
init_args = {}
for k, v in args:
convert = type(defaults[k])
if isinstance(convert, type(None)):
raise ValueError('cannot specify %s parameter', k)
elif isinstance(convert, bool):
convert = lambda s: s.lower() in {'t', 'y', 'true', 'yes'}
init_args[k] = convert(v)
return cls(**init_args) if call_init else (cls, init_args)
def kfolds_evaluation(folds, model, scoring, skip_majority, x, y):
kf = KFold(x.shape[0], folds, shuffle=True)
scorer = Scorer(scoring, skip_majority)
scores_dummy, scores_test, scores_train = [], [], []
for train_index, test_index in kf:
x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]
model.fit(x_train, y_train)
dummy = DummyClassifier()
dummy.fit(x_train, y_train)
scores_test.append(scorer(model, x_test, y_test))
scores_dummy.append(scorer(dummy, x_test, y_test))
scores_train.append(scorer(model, x_train, y_train))
logger.info('%d-folds cross evaluation results', folds)
logger.info(' minimum test %f dummy %f training %f',
min(scores_test), min(scores_dummy), min(scores_train))
logger.info(' maximum test %f dummy %f training %f',
max(scores_test), max(scores_dummy), max(scores_train))
logger.info(' average test %f dummy %f training %f',
np.average(scores_test), np.average(scores_dummy), np.average(scores_train))
logger.info(' median test %f dummy %f training %f',
np.median(scores_test), np.median(scores_dummy), np.median(scores_train))
logger.debug('full test scores: %s', scores_test)
logger.debug('full dummy scores: %s', scores_dummy)
logger.debug('full train scores: %s', scores_train)
def gold_evaluation(sentences, extractor, gazetteer, model_cls, model_args):
logger.info('Evaluating on the gold sentences')
for each in sentences:
if not each.get('gold_fes'):
extractor.process_sentence(each['sentence'], each['lu'], each['fes'],
add_unknown=True, gazetteer=gazetteer)
x_tr, y_tr = extractor.get_features(refit=True)
extractor.start()
tagged_gold = []
for each in sentences:
if each.get('gold_fes'):
tagged_gold.append((each['gold_fes'], extractor.process_sentence(
each['sentence'], each['lu'], each['fes'],
add_unknown=False, gazetteer=gazetteer
)))
if not tagged_gold:
logger.warn('asked to evaluate gold, but no gold sentences found')
return
x_gold, _ = extractor.get_features(refit=False)
y_gold = []
for gold_fes, tagged in tagged_gold:
for chunk, is_sample in tagged:
if is_sample:
y_gold.append([extractor.label_index[fe or 'O'] for fe in gold_fes.get(chunk, [])])
assert len(y_gold) == x_gold.shape[0]
model = FeatureSelectedClassifier(model_cls, extractor.lu_column(), model_args)
model.fit(x_tr, y_tr)
y_pred = model.predict(x_gold)
correct = len([1 for actual, predicted in zip(y_gold, y_pred) if predicted in actual])
logger.info('Gold accuracy: %f (%d / %d roles)', float(correct) / len(y_gold),
correct, len(y_gold))
@click.command()
@click.argument('training-set', type=click.File('r'))
@click.argument('language')
@click.option('-o', '--outfile', type=click.Path(dir_okay=False, writable=True),
default='output/classifier_model.pkl', help='Where to save the model')
@click.option('--model-class', default='sklearn.svm.SVC')
@click.option('--model-param', '-p', type=(unicode, unicode), multiple=True,
help='kwargs for the model. See scikit doc',
default=[('kernel', 'linear'), ('C', '0.15')])
@click.option('--extractor-class', default='strephit.classification.feature_extractors.BagOfTermsFeatureExtractor')
@click.option('--extractor-param', '-P', type=(unicode, unicode), multiple=True,
help='extrator kwargs',
default=[('window_width', '2'), ('collapse_fes', 'true')])
@click.option('--gazetteer', type=click.File('r'))
@click.option('--folds', default=0, help='Perform k-fold evaluation before training on full data')
@click.option('--scoring', default='macro')
@click.option('--skip-majority', is_flag=True)
@click.option('--evaluate-gold', is_flag=True)
def main(training_set, language, outfile, model_class, model_param, extractor_class,
extractor_param, gazetteer, folds, scoring, skip_majority, evaluate_gold):
""" Trains the classifier """
gazetteer = reverse_gazetteer(json.load(gazetteer)) if gazetteer else {}
model_cls, model_args = initialize(model_class, model_param, False)
if evaluate_gold:
gold_extractor = initialize(
extractor_class, [('language', language)] + list(extractor_param), True
)
gold_evaluation(
map(json.loads, training_set), gold_extractor,
gazetteer, model_cls, model_args
)
training_set.seek(0)
extractor = initialize(extractor_class, [('language', language)] + list(extractor_param), True)
logger.info("Building training set from '%s' ..." % training_set.name)
for row in training_set:
data = json.loads(row)
extractor.process_sentence(data['sentence'], data['lu'], data['fes'],
add_unknown=True, gazetteer=gazetteer)
x, y = extractor.get_features(refit=True)
logger.info('Got %d samples with %d features each', *x.shape)
model = FeatureSelectedClassifier(model_cls, extractor.lu_column(), model_args)
if folds > 1:
kfolds_evaluation(folds, model, scoring, skip_majority, x, y)
logger.info('Fitting model ...')
model.fit(x, y)
joblib.dump((model, {
'extractor': extractor
}), outfile)
logger.info("Done, dumped model to '%s'", outfile)
| gpl-3.0 |
osbd/osbd-2016 | slides/crist/get_data.py | 1 | 1419 | import os
import urllib
from glob import glob
import dask.bag as db
import numpy as np
import zarr
from dask.diagnostics import ProgressBar
from netCDF4 import Dataset
def download(url):
opener = urllib.URLopener()
filename = os.path.basename(url)
path = os.path.join('data', filename)
opener.retrieve(url, path)
def download_weather():
# Create data directory
if not os.path.exists('data'):
os.mkdir('data')
template = ('http://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/'
'noaa.oisst.v2.highres/sst.day.mean.{year}.v2.nc')
urls = [template.format(year=year) for year in range(1981, 2016)]
b = db.from_sequence(urls, partition_size=1)
print("Downloading Weather Data")
print("------------------------")
with ProgressBar():
b.map(download).compute(n_workers=8)
def transform_weather():
if os.path.exists('sst.day.mean.v2.zarr'):
return
datasets = [Dataset(path)['sst'] for path in sorted(glob('data/*.nc'))]
n = sum(d.shape[0] for d in datasets)
shape = (n, 720, 1440)
chunks = (72, 360, 360)
f = zarr.open_array('sst.day.mean.v2.zarr', shape=shape, chunks=chunks,
dtype='f4')
i = 0
for d in datasets:
m = d.shape[0]
f[i:i + m] = d[:].filled(np.nan)
i += m
if __name__ == '__main__':
download_weather()
transform_weather()
| mit |
justincassidy/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 214 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
hankcs/HanLP | hanlp/layers/feedforward.py | 1 | 4141 | """
A feed-forward neural network.
"""
from typing import List, Union
import torch
from hanlp.utils.torch_util import activation_from_name
class FeedForward(torch.nn.Module):
"""
This `Module` is a feed-forward neural network, just a sequence of `Linear` layers with
activation functions in between.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of `Linear` layers to apply to the input.
hidden_dims : `Union[int, List[int]]`, required
The output dimension of each of the `Linear` layers. If this is a single `int`, we use
it for all `Linear` layers. If it is a `List[int]`, `len(hidden_dims)` must be
`num_layers`.
activations : `Union[Activation, List[Activation]]`, required
The activation function to use after each `Linear` layer. If this is a single function,
we use it after all `Linear` layers. If it is a `List[Activation]`,
`len(activations)` must be `num_layers`. Activation must have torch.nn.Module type.
dropout : `Union[float, List[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `List[float]` is the same as with other parameters.
# Examples
```python
FeedForward(124, 2, [64, 32], torch.nn.ReLU(), 0.2)
#> FeedForward(
#> (_activations): ModuleList(
#> (0): ReLU()
#> (1): ReLU()
#> )
#> (_linear_layers): ModuleList(
#> (0): Linear(in_features=124, out_features=64, bias=True)
#> (1): Linear(in_features=64, out_features=32, bias=True)
#> )
#> (_dropout): ModuleList(
#> (0): Dropout(p=0.2, inplace=False)
#> (1): Dropout(p=0.2, inplace=False)
#> )
#> )
```
"""
def __init__(
self,
input_dim: int,
num_layers: int,
hidden_dims: Union[int, List[int]],
activations: Union[str, List[str]],
dropout: Union[float, List[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers # type: ignore
if not isinstance(activations, list):
activations = [activations] * num_layers # type: ignore
activations = [activation_from_name(a)() for a in activations]
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(hidden_dims) != num_layers:
raise ValueError(
"len(hidden_dims) (%d) != num_layers (%d)" % (len(hidden_dims), num_layers)
)
if len(activations) != num_layers:
raise ValueError(
"len(activations) (%d) != num_layers (%d)" % (len(activations), num_layers)
)
if len(dropout) != num_layers:
raise ValueError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output
| apache-2.0 |
harshaneelhg/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 203 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
quheng/scikit-learn | sklearn/manifold/locally_linear.py | 205 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/feature_selection/__init__.py | 243 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
quheng/scikit-learn | sklearn/utils/tests/test_utils.py | 214 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/manifold/tests/test_mds.py | 321 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
pystruct/pystruct | pystruct/utils/inference.py | 1 | 5309 | import itertools
from sklearn.externals.joblib import Parallel, delayed
import numpy as np
def unwrap_pairwise(y):
"""given a y that may contain pairwise marginals, yield plain y."""
if isinstance(y, tuple):
return y[0]
return y
def expand_sym(sym_compressed):
"""Expand compressed symmetric matrix to full square matrix.
Similar to scipy.spatial.squareform, but also contains the
diagonal.
"""
length = sym_compressed.size
size = int(np.sqrt(2 * length + 0.25) - 1 / 2.)
sym = np.zeros((size, size))
sym[np.tri(size, dtype=np.bool)] = sym_compressed
return (sym + sym.T - np.diag(np.diag(sym)))
def compress_sym(sym_expanded, make_symmetric=True):
"""Compress symmetric matrix to a vector.
Similar to scipy.spatial.squareform, but also contains the
diagonal.
Parameters
----------
sym_expanded : nd-array, shape (size, size)
Input matrix to compress.
make_symmetric : bool (default=True)
Whether to symmetrize the input matrix before compressing.
It is made symmetric by using
``sym_expanded + sym_expanded.T - np.diag(np.diag(sym_expanded))``
This makes sense if only one of the two entries was non-zero before.
"""
size = sym_expanded.shape[0]
if make_symmetric:
sym_expanded = (sym_expanded + sym_expanded.T -
np.diag(np.diag(sym_expanded)))
return sym_expanded[np.tri(size, dtype=np.bool)]
## global functions for easy parallelization
def find_constraint(model, x, y, w, y_hat=None, relaxed=True,
compute_difference=True):
"""Find most violated constraint, or, given y_hat,
find slack and djoint_feature for this constraing.
As for finding the most violated constraint, it is enough to compute
joint_feature(x, y_hat), not djoint_feature, we can optionally skip
computing joint_feature(x, y) using compute_differences=False
"""
if y_hat is None:
y_hat = model.loss_augmented_inference(x, y, w, relaxed=relaxed)
joint_feature = model.joint_feature
if getattr(model, 'rescale_C', False):
delta_joint_feature = -joint_feature(x, y_hat, y)
else:
delta_joint_feature = -joint_feature(x, y_hat)
if compute_difference:
if getattr(model, 'rescale_C', False):
delta_joint_feature += joint_feature(x, y, y)
else:
delta_joint_feature += joint_feature(x, y)
if isinstance(y_hat, tuple):
# continuous label
loss = model.continuous_loss(y, y_hat[0])
else:
loss = model.loss(y, y_hat)
slack = max(loss - np.dot(w, delta_joint_feature), 0)
return y_hat, delta_joint_feature, slack, loss
def find_constraint_latent(model, x, y, w, relaxed=True):
"""Find most violated constraint.
As for finding the most violated constraint, it is enough to compute
joint_feature(x, y_hat), not djoint_feature, we can optionally skip
computing joint_feature(x, y) using compute_differences=False
"""
h = model.latent(x, y, w)
h_hat = model.loss_augmented_inference(x, h, w, relaxed=relaxed)
joint_feature = model.joint_feature
delta_joint_feature = joint_feature(x, h) - joint_feature(x, h_hat)
loss = model.loss(y, h_hat)
slack = max(loss - np.dot(w, delta_joint_feature), 0)
return h_hat, delta_joint_feature, slack, loss
def inference(model, x, w, constraints=None):
if constraints:
return model.inference(x, w, constraints=constraints)
else:
return model.inference(x, w)
def loss_augmented_inference(model, x, y, w, relaxed=True):
return model.loss_augmented_inference(x, y, w, relaxed=relaxed)
# easy debugging
def objective_primal(model, w, X, Y, C, variant='n_slack', n_jobs=1):
objective = 0
constraints = Parallel(
n_jobs=n_jobs)(delayed(find_constraint)(
model, x, y, w)
for x, y in zip(X, Y))
slacks = list(zip(*constraints))[2]
if variant == 'n_slack':
slacks = np.maximum(slacks, 0)
objective = max(np.sum(slacks), 0) * C + np.sum(w ** 2) / 2.
return objective
def exhaustive_loss_augmented_inference(model, x, y, w):
size = y.size
best_y = None
best_energy = np.inf
for y_hat in itertools.product(range(model.n_states), repeat=size):
y_hat = np.array(y_hat).reshape(y.shape)
#print("trying %s" % repr(y_hat))
joint_feature = model.joint_feature(x, y_hat)
energy = -model.loss(y, y_hat) - np.dot(w, joint_feature)
if energy < best_energy:
best_energy = energy
best_y = y_hat
return best_y
def exhaustive_inference(model, x, w):
# hack to get the grid shape of x
if isinstance(x, np.ndarray):
feats = x
else:
feats = model._get_features(x)
size = np.prod(feats.shape[:-1])
best_y = None
best_energy = np.inf
for y_hat in itertools.product(range(model.n_states), repeat=size):
y_hat = np.array(y_hat).reshape(feats.shape[:-1])
#print("trying %s" % repr(y_hat))
joint_feature = model.joint_feature(x, y_hat)
energy = -np.dot(w, joint_feature)
if energy < best_energy:
best_energy = energy
best_y = y_hat
return best_y
| bsd-2-clause |
justincassidy/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 161 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 269 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 269 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
fishcorn/pylearn2 | pylearn2/scripts/gsn_example.py | 44 | 8059 | """
.. todo::
WRITEME
"""
from __future__ import print_function
import cPickle as pickle
import itertools
import numpy as np
from theano.compat.six.moves import xrange
import theano.tensor as T
from pylearn2.expr.activations import rescaled_softmax
from pylearn2.costs.autoencoder import MeanBinaryCrossEntropy
from pylearn2.costs.gsn import GSNCost
from pylearn2.corruption import (BinomialSampler, GaussianCorruptor,
MultinomialSampler, SaltPepperCorruptor,
SmoothOneHotCorruptor)
from pylearn2.datasets.mnist import MNIST
from pylearn2.distributions.parzen import ParzenWindows
from pylearn2.models.gsn import GSN, JointGSN
from pylearn2.termination_criteria import EpochCounter
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import SGD, MonitorBasedLRAdjuster
from pylearn2.utils import image, safe_zip
# define some common parameters
HIDDEN_SIZE = 1000
SALT_PEPPER_NOISE = 0.4
GAUSSIAN_NOISE = 0.5
WALKBACK = 0
LEARNING_RATE = 0.25
MOMENTUM = 0.75
MAX_EPOCHS = 100
BATCHES_PER_EPOCH = None # covers full training set
BATCH_SIZE = 100
ds = MNIST(which_set='train')
def test_train_ae():
"""
.. todo::
WRITEME
"""
GC = GaussianCorruptor
gsn = GSN.new(
layer_sizes=[ds.X.shape[1], 1000],
activation_funcs=["sigmoid", "tanh"],
pre_corruptors=[None, GC(1.0)],
post_corruptors=[SaltPepperCorruptor(0.5), GC(1.0)],
layer_samplers=[BinomialSampler(), None],
tied=False
)
# average MBCE over example rather than sum it
_mbce = MeanBinaryCrossEntropy()
reconstruction_cost = lambda a, b: _mbce.cost(a, b) / ds.X.shape[1]
c = GSNCost([(0, 1.0, reconstruction_cost)], walkback=WALKBACK)
alg = SGD(
LEARNING_RATE,
init_momentum=MOMENTUM,
cost=c,
termination_criterion=EpochCounter(MAX_EPOCHS),
batches_per_iter=BATCHES_PER_EPOCH,
batch_size=BATCH_SIZE,
monitoring_dataset=ds,
monitoring_batches=10
)
trainer = Train(ds, gsn, algorithm=alg, save_path="gsn_ae_example.pkl",
save_freq=5)
trainer.main_loop()
print("done training")
def test_sample_ae():
"""
Visualize some samples from the trained unsupervised GSN.
"""
with open("gsn_ae_example.pkl") as f:
gsn = pickle.load(f)
# random point to start at
mb_data = MNIST(which_set='test').X[105:106, :]
history = gsn.get_samples([(0, mb_data)], walkback=1000,
symbolic=False, include_first=True)
history = list(itertools.chain(*history))
history = np.vstack(history)
tiled = image.tile_raster_images(history,
img_shape=[28,28],
tile_shape=[50,50],
tile_spacing=(2,2))
image.save("gsn_ae_example.png", tiled)
# code to get log likelihood from kernel density estimator
# this crashed on GPU (out of memory), but works on CPU
pw = ParzenWindows(MNIST(which_set='test').X, .20)
print(pw.get_ll(history))
def test_train_supervised():
"""
Train a supervised GSN.
"""
# initialize the GSN
gsn = GSN.new(
layer_sizes=[ds.X.shape[1], 1000, ds.y.shape[1]],
activation_funcs=["sigmoid", "tanh", rescaled_softmax],
pre_corruptors=[GaussianCorruptor(0.5)] * 3,
post_corruptors=[SaltPepperCorruptor(.3), None, SmoothOneHotCorruptor(.5)],
layer_samplers=[BinomialSampler(), None, MultinomialSampler()],
tied=False
)
# average over costs rather than summing
_rcost = MeanBinaryCrossEntropy()
reconstruction_cost = lambda a, b: _rcost.cost(a, b) / ds.X.shape[1]
_ccost = MeanBinaryCrossEntropy()
classification_cost = lambda a, b: _ccost.cost(a, b) / ds.y.shape[1]
# combine costs into GSNCost object
c = GSNCost(
[
# reconstruction on layer 0 with weight 1.0
(0, 1.0, reconstruction_cost),
# classification on layer 2 with weight 2.0
(2, 2.0, classification_cost)
],
walkback=WALKBACK,
mode="supervised"
)
alg = SGD(
LEARNING_RATE,
init_momentum=MOMENTUM,
cost=c,
termination_criterion=EpochCounter(MAX_EPOCHS),
batches_per_iter=BATCHES_PER_EPOCH,
batch_size=BATCH_SIZE,
monitoring_dataset=ds,
monitoring_batches=10,
)
trainer = Train(ds, gsn, algorithm=alg,
save_path="gsn_sup_example.pkl", save_freq=10,
extensions=[MonitorBasedLRAdjuster()])
trainer.main_loop()
print("done training")
def test_classify():
"""
See how well a (supervised) GSN performs at classification.
"""
with open("gsn_sup_example.pkl") as f:
gsn = pickle.load(f)
gsn = JointGSN.convert(gsn)
# turn off corruption
gsn._corrupt_switch = False
ds = MNIST(which_set='test')
mb_data = ds.X
y = ds.y
for i in xrange(1, 10):
y_hat = gsn.classify(mb_data, trials=i)
errors = np.abs(y_hat - y).sum() / 2.0
# error indices
#np.sum(np.abs(y_hat - y), axis=1) != 0
print(i, errors, errors / mb_data.shape[0])
def test_sample_supervised(idxs=None, noisy=True):
"""
Visualize samples and labels produced by GSN.
"""
with open("gsn_sup_example.pkl") as f:
gsn = pickle.load(f)
gsn._corrupt_switch = noisy
ds = MNIST(which_set='test')
if idxs is None:
data = ds.X[100:150]
else:
data = ds.X[idxs]
# change the walkback parameter to make the data fill up rows in image
samples = gsn.get_samples([(0, data)],
indices=[0, 2],
walkback=21, symbolic=False,
include_first=True)
stacked = vis_samples(samples)
tiled = image.tile_raster_images(stacked,
img_shape=[28,28],
tile_shape=[50,50],
tile_spacing=(2,2))
image.save("gsn_sup_example.png", tiled)
def vis_samples(samples):
"""
.. todo::
WRITEME
"""
from PIL import ImageDraw, ImageFont
img = image.pil_from_ndarray(np.zeros((28, 28)))
chains = []
num_rows = samples[0][0].shape[0]
for row in xrange(num_rows):
images = []
labels = []
for step in samples:
assert len(step) == 2
images.append(step[0][row])
vec = step[1][row]
sorted_idxs = np.argsort(vec)
label1 = sorted_idxs[-1]
label2 = sorted_idxs[-2]
if vec[label1] == 0:
ratio = 1
else:
ratio = vec[label2] / vec[label1]
c = img.copy()
draw = ImageDraw.Draw(c)
draw.text((8, 11), str(label1), 255)
draw.text((14, 11), str(label2), int(255 * ratio))
nd = image.ndarray_from_pil(c)[:, :, 0]
nd = nd.reshape((1, 784))
labels.append(nd)
data = safe_zip(images, labels)
data = list(itertools.chain(*data))
# white block to indicate end of chain
data.extend([np.ones((1, 784))] * 2)
chains.append(np.vstack(data))
return np.vstack(chains)
# some utility methods for viewing MNIST characters without any GUI
def print_char(A):
"""
.. todo::
WRITEME
"""
print(a_to_s(A.round().reshape((28, 28))))
def a_to_s(A):
"""Prints binary array"""
strs = []
for row in A:
x = [None] * len(row)
for i, num in enumerate(row):
if num != 0:
x[i] = "@"
else:
x[i] = " "
strs.append("".join(x))
return "\n".join(strs)
if __name__ == '__main__':
test_train_supervised()
test_classify()
test_sample_supervised()
| bsd-3-clause |
adammenges/statsmodels | statsmodels/robust/robust_linear_model.py | 27 | 25571 | """
Robust linear models with support for the M-estimators listed under
:ref:`norms <norms>`.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York. 1981.
PJ Huber. 1973, 'The 1972 Wald Memorial Lectures: Robust Regression:
Asymptotics, Conjectures, and Monte Carlo.' The Annals of Statistics,
1.5, 799-821.
R Venables, B Ripley. 'Modern Applied Statistics in S' Springer, New York,
2002.
"""
from statsmodels.compat.python import string_types
import numpy as np
import scipy.stats as stats
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.regression.linear_model as lm
import statsmodels.robust.norms as norms
import statsmodels.robust.scale as scale
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.compat.numpy import np_matrix_rank
__all__ = ['RLM']
def _check_convergence(criterion, iteration, tol, maxiter):
return not (np.any(np.fabs(criterion[iteration] -
criterion[iteration-1]) > tol) and iteration < maxiter)
class RLM(base.LikelihoodModel):
__doc__ = """
Robust Linear Models
Estimate a robust linear model via iteratively reweighted least squares
given a robust criterion estimator.
%(params)s
M : statsmodels.robust.norms.RobustNorm, optional
The robust criterion function for downweighting outliers.
The current options are LeastSquares, HuberT, RamsayE, AndrewWave,
TrimmedMean, Hampel, and TukeyBiweight. The default is HuberT().
See statsmodels.robust.norms for more information.
%(extra_params)s
Notes
-----
**Attributes**
df_model : float
The degrees of freedom of the model. The number of regressors p less
one for the intercept. Note that the reported model degrees
of freedom does not count the intercept as a regressor, though
the model is assumed to have an intercept.
df_resid : float
The residual degrees of freedom. The number of observations n
less the number of regressors p. Note that here p does include
the intercept as using a degree of freedom.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
M : statsmodels.robust.norms.RobustNorm
See above. Robust estimator instance instantiated.
nobs : float
The number of observations n
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
RLM has no whiten method, so this is just the pseudo inverse of the
design.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
Examples
---------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> data.exog = sm.add_constant(data.exog)
>>> rlm_model = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.HuberT())
>>> rlm_results = rlm_model.fit()
>>> rlm_results.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results.bse
array([ 0.11100521, 0.30293016, 0.12864961, 9.79189854])
>>> rlm_results_HC2 = rlm_model.fit(cov="H2")
>>> rlm_results_HC2.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results_HC2.bse
array([ 0.11945975, 0.32235497, 0.11796313, 9.08950419])
>>>
>>> rlm_hamp_hub = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.Hampel()).fit(
sm.robust.scale.HuberScale())
>>> rlm_hamp_hub.params
array([ 0.73175452, 1.25082038, -0.14794399, -40.27122257])
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def __init__(self, endog, exog, M=norms.HuberT(), missing='none',
**kwargs):
self.M = M
super(base.LikelihoodModel, self).__init__(endog, exog,
missing=missing, **kwargs)
self._initialize()
#things to remove_data
self._data_attr.extend(['weights', 'pinv_wexog'])
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = (np.float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
self.df_model = np.float(np_matrix_rank(self.exog)-1)
self.nobs = float(self.endog.shape[0])
def score(self, params):
raise NotImplementedError
def information(self, params):
raise NotImplementedError
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like, optional after fit has been called
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
#copied from linear_model
if exog is None:
exog = self.exog
return np.dot(exog, params)
def loglike(self, params):
raise NotImplementedError
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
return self.M((self.endog - tmp_results.fittedvalues) /
tmp_results.scale).sum()
def _update_history(self, tmp_results, history, conv):
history['params'].append(tmp_results.params)
history['scale'].append(tmp_results.scale)
if conv == 'dev':
history['deviance'].append(self.deviance(tmp_results))
elif conv == 'sresid':
history['sresid'].append(tmp_results.resid/tmp_results.scale)
elif conv == 'weights':
history['weights'].append(tmp_results.model.weights)
return history
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid, center=0)
if self.scale_est.lower() == 'stand_mad':
return scale.mad(resid)
else:
raise ValueError("Option %s for scale_est not understood" %
self.scale_est)
elif isinstance(self.scale_est, scale.HuberScale):
return self.scale_est(self.df_resid, self.nobs, resid)
else:
return scale.scale_est(self, resid)**2
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev'):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : string
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : string, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : string
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : string or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
Returns
-------
results : object
statsmodels.rlm.RLMresults
"""
if not cov.upper() in ["H1","H2","H3"]:
raise ValueError("Covariance matrix %s not understood" % cov)
else:
self.cov = cov.upper()
conv = conv.lower()
if not conv in ["weights","coefs","dev","sresid"]:
raise ValueError("Convergence argument %s not understood" \
% conv)
self.scale_est = scale_est
if (isinstance(scale_est,
string_types) and scale_est.lower() == "stand_mad"):
from warnings import warn
warn("stand_mad is deprecated and will be removed in 0.7.0",
FutureWarning)
wls_results = lm.WLS(self.endog, self.exog).fit()
if not init:
self.scale = self._estimate_scale(wls_results.resid)
history = dict(params = [np.inf], scale = [])
if conv == 'coefs':
criterion = history['params']
elif conv == 'dev':
history.update(dict(deviance = [np.inf]))
criterion = history['deviance']
elif conv == 'sresid':
history.update(dict(sresid = [np.inf]))
criterion = history['sresid']
elif conv == 'weights':
history.update(dict(weights = [np.inf]))
criterion = history['weights']
# done one iteration so update
history = self._update_history(wls_results, history, conv)
iteration = 1
converged = 0
while not converged:
self.weights = self.M.weights(wls_results.resid/self.scale)
wls_results = lm.WLS(self.endog, self.exog,
weights=self.weights).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
history = self._update_history(wls_results, history, conv)
iteration += 1
converged = _check_convergence(criterion, iteration, tol, maxiter)
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
history['iteration'] = iteration
results.fit_history = history
results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,
norm=self.M.__class__.__name__, conv=conv)
#norm is not changed in fit, no old state
#doing the next causes exception
#self.cov = self.scale_est = None #reset for additional fits
#iteration and history could contain wrong state with repeated fit
return RLMResultsWrapper(results)
class RLMResults(base.LikelihoodModelResults):
"""
Class to contain RLM results
Returns
-------
**Attributes**
bcov_scaled : array
p x p scaled covariance matrix specified in the model fit method.
The default is H1. H1 is defined as
``k**2 * (1/df_resid*sum(M.psi(sresid)**2)*scale**2)/
((1/nobs*sum(M.psi_deriv(sresid)))**2) * (X.T X)^(-1)``
where ``k = 1 + (df_model +1)/nobs * var_psiprime/m**2``
where ``m = mean(M.psi_deriv(sresid))`` and
``var_psiprime = var(M.psi_deriv(sresid))``
H2 is defined as
``k * (1/df_resid) * sum(M.psi(sresid)**2) *scale**2/
((1/nobs)*sum(M.psi_deriv(sresid)))*W_inv``
H3 is defined as
``1/k * (1/df_resid * sum(M.psi(sresid)**2)*scale**2 *
(W_inv X.T X W_inv))``
where `k` is defined as above and
``W_inv = (M.psi_deriv(sresid) exog.T exog)^(-1)``
See the technical documentation for cleaner formulae.
bcov_unscaled : array
The usual p x p covariance matrix with scale set equal to 1. It
is then just equivalent to normalized_cov_params.
bse : array
An array of the standard errors of the parameters. The standard
errors are taken from the robust covariance matrix specified in the
argument to fit.
chisq : array
An array of the chi-squared values of the paramter estimates.
df_model
See RLM.df_model
df_resid
See RLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `deviance`,
`params`, `iteration` and the convergence criteria specified in
`RLM.fit`, if different from `deviance` or `params`.
fit_options : dict
Contains the options given to fit.
fittedvalues : array
The linear predicted values. dot(exog, params)
model : statsmodels.rlm.RLM
A reference to the model instance
nobs : float
The number of observations n
normalized_cov_params : array
See RLM.normalized_cov_params
params : array
The coefficients of the fitted model
pinv_wexog : array
See RLM.pinv_wexog
pvalues : array
The p values associated with `tvalues`. Note that `tvalues` are assumed to be distributed
standard normal rather than Student's t.
resid : array
The residuals of the fitted model. endog - fittedvalues
scale : float
The type of scale is determined in the arguments to the fit method in
RLM. The reported scale is taken from the residuals of the weighted
least squares in the last IRLS iteration if update_scale is True. If
update_scale is False, then it is the scale given by the first OLS
fit before the IRLS iterations.
sresid : array
The scaled residuals.
tvalues : array
The "t-statistics" of params. These are defined as params/bse where bse are taken
from the robust covariance matrix specified in the argument to fit.
weights : array
The reported weights are determined by passing the scaled residuals
from the last weighted least squares fit in the IRLS algortihm.
See also
--------
statsmodels.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale):
super(RLMResults, self).__init__(model, params,
normalized_cov_params, scale)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self.nobs = model.nobs
self._cache = resettable_cache()
#for remove_data
self.data_in_cache = ['sresid']
self.cov_params_default = self.bcov_scaled
#TODO: "pvals" should come from chisq on bse?
@cache_readonly
def fittedvalues(self):
return np.dot(self.model.exog, self.params)
@cache_readonly
def resid(self):
return self.model.endog - self.fittedvalues # before bcov
@cache_readonly
def sresid(self):
return self.resid/self.scale
@cache_readonly
def bcov_unscaled(self):
return self.normalized_cov_params
@cache_readonly
def weights(self):
return self.model.weights
@cache_readonly
def bcov_scaled(self):
model = self.model
m = np.mean(model.M.psi_deriv(self.sresid))
var_psiprime = np.var(model.M.psi_deriv(self.sresid))
k = 1 + (self.df_model+1)/self.nobs * var_psiprime/m**2
if model.cov == "H1":
return k**2 * (1/self.df_resid*\
np.sum(model.M.psi(self.sresid)**2)*self.scale**2)\
/((1/self.nobs*np.sum(model.M.psi_deriv(self.sresid)))**2)\
*model.normalized_cov_params
else:
W = np.dot(model.M.psi_deriv(self.sresid)*model.exog.T,
model.exog)
W_inv = np.linalg.inv(W)
# [W_jk]^-1 = [SUM(psi_deriv(Sr_i)*x_ij*x_jk)]^-1
# where Sr are the standardized residuals
if model.cov == "H2":
# These are correct, based on Huber (1973) 8.13
return k*(1/self.df_resid)*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
/((1/self.nobs)*np.sum(\
model.M.psi_deriv(self.sresid)))*W_inv
elif model.cov == "H3":
return k**-1*1/self.df_resid*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
*np.dot(np.dot(W_inv, np.dot(model.exog.T,model.exog)),\
W_inv)
@cache_readonly
def pvalues(self):
return stats.norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.bcov_scaled))
@cache_readonly
def chisq(self):
return (self.params/self.bse)**2
def remove_data(self):
super(self.__class__, self).remove_data()
#self.model.history['sresid'] = None
#self.model.history['weights'] = None
remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__
def summary(self, yname=None, xname=None, title=0, alpha=.05,
return_fmt='text'):
"""
This is for testing the new summary setup
"""
from statsmodels.iolib.summary import (summary_top,
summary_params, summary_return)
## left = [(i, None) for i in (
## 'Dependent Variable:',
## 'Model type:',
## 'Method:',
## 'Date:',
## 'Time:',
## 'Number of Obs:',
## 'df resid',
## 'df model',
## )]
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['IRLS']),
('Norm:', [self.fit_options['norm']]),
('Scale Est.:', [self.fit_options['scale_est']]),
('Cov Type:', [self.fit_options['cov']]),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if not title is None:
title = "Robust linear Model Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
#diagnostic table is not used yet
# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
#add warnings/notes, added to text format only
etext =[]
wstr = \
'''If the model instance has been used for another fit with different fit
parameters, then the fit options might not be the correct ones anymore .'''
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
def summary2(self, xname=None, yname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function for regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry
class RLMResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(RLMResultsWrapper, RLMResults)
if __name__=="__main__":
#NOTE: This is to be removed
#Delivery Time Data is taken from Montgomery and Peck
import statsmodels.api as sm
#delivery time(minutes)
endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,
79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,
9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])
#number of cases, distance (Feet)
exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,
7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,
605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,
150]])
exog = exog.T
exog = sm.add_constant(exog)
# model_ols = models.regression.OLS(endog, exog)
# results_ols = model_ols.fit()
# model_ramsaysE = RLM(endog, exog, M=norms.RamsayE())
# results_ramsaysE = model_ramsaysE.fit(update_scale=False)
# model_andrewWave = RLM(endog, exog, M=norms.AndrewWave())
# results_andrewWave = model_andrewWave.fit(update_scale=False)
# model_hampel = RLM(endog, exog, M=norms.Hampel(a=1.7,b=3.4,c=8.5)) # convergence problems with scale changed, not with 2,4,8 though?
# results_hampel = model_hampel.fit(update_scale=False)
#######################
### Stack Loss Data ###
#######################
from statsmodels.datasets.stackloss import load
data = load()
data.exog = sm.add_constant(data.exog)
#############
### Huber ###
#############
# m1_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber1 = m1_Huber.fit()
# m2_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber2 = m2_Huber.fit(cov="H2")
# m3_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber3 = m3_Huber.fit(cov="H3")
##############
### Hampel ###
##############
# m1_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel1 = m1_Hampel.fit()
# m2_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel2 = m2_Hampel.fit(cov="H2")
# m3_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel3 = m3_Hampel.fit(cov="H3")
################
### Bisquare ###
################
# m1_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare1 = m1_Bisquare.fit()
# m2_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare2 = m2_Bisquare.fit(cov="H2")
# m3_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare3 = m3_Bisquare.fit(cov="H3")
##############################################
# Huber's Proposal 2 scaling #
##############################################
################
### Huber'sT ###
################
m1_Huber_H = RLM(data.endog, data.exog, M=norms.HuberT())
results_Huber1_H = m1_Huber_H.fit(scale_est=scale.HuberScale())
# m2_Huber_H
# m3_Huber_H
# m4 = RLM(data.endog, data.exog, M=norms.HuberT())
# results4 = m1.fit(scale_est="Huber")
# m5 = RLM(data.endog, data.exog, M=norms.Hampel())
# results5 = m2.fit(scale_est="Huber")
# m6 = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results6 = m3.fit(scale_est="Huber")
# print """Least squares fit
#%s
#Huber Params, t = 2.
#%s
#Ramsay's E Params
#%s
#Andrew's Wave Params
#%s
#Hampel's 17A Function
#%s
#""" % (results_ols.params, results_huber.params, results_ramsaysE.params,
# results_andrewWave.params, results_hampel.params)
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/linear_model/plot_ols_3d.py | 347 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/tests/test_geoip.py | 290 | 4204 | import os, unittest
from django.db import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.utils import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, basestring):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '130.80.29.3'
fqdn = 'chron.com'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.failIf(not isinstance(geom, GEOSGeometry))
lon, lat = (-95.3670, 29.7523)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
glencoesoftware/omero-user-scripts | util_scripts/Copy_Full_Res_Images.py | 2 | 7420 | # -*- coding: utf-8 -*-
"""
-----------------------------------------------------------------------------
Copyright (C) 2014 Glencoe Software, Inc. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
------------------------------------------------------------------------------
"""
import omero
from omero.gateway import BlitzGateway
from omero.rtypes import rstring, rlong
import omero.scripts as scripts
import re
class copyHighResImages:
def __init__(self, conn, scriptParams):
"""
Class to copy high resolution svs and afi files to the new datasets.
scriptParams have to include target "Project_ID" and a list of
source dataset "IDS".
@param conn: BlitzGateway connector
@param scriptParams: scipt parameters.
"""
self.FILENAME_REGEX = re.compile(scriptParams["Regex_String"])
self.conn = conn
self.target_project_id = scriptParams["Project_ID"]
self.source_datasets_list = scriptParams["IDs"]
self.image_dict = self.getImageList()
self.target_dataset_names = self.getTargetDatasetNames()
self.query_service = self.conn.getQueryService()
self.update_service = self.conn.getUpdateService()
self.dataset_query = \
"select d from Project as p" \
" left outer join p.datasetLinks as links" \
" left outer join links.child as d" \
" left outer join fetch d.imageLinks as i_link" \
" left outer join fetch i_link.child" \
" where p.id = :pid and d.name = :dname"
def getImageList(self):
"""
Retrive images from the source datasets.
"""
image_dict = {}
for datasetId in self.source_datasets_list:
dataset = conn.getObject("Dataset", datasetId)
for image in dataset.listChildren():
if "[" in image.getName():
continue
file_name = self.FILENAME_REGEX.match(image.getName())
if file_name is None:
continue
image_dict[image.getId()] = file_name.group(1)
return image_dict
def printImageList(self):
for image in self.image_dict:
print image, self.image_dict[image]
def getTargetDatasetNames(self):
"""
Convert image names to target dataset names and return as unique
list.
"""
dataset_names = set()
for image in self.image_dict:
dataset_names.add(self.image_dict[image])
return dataset_names
def getDatasetMap(self):
"""
Convert unique list of dataset names to a map
(dataset_name, dataset_object).
"""
dataset_map = {}
params = omero.sys.ParametersI()
params.add("pid", rlong(self.target_project_id))
for name in self.target_dataset_names:
params.add("dname", rstring(name))
dataset = self.query_service.findByQuery(
self.dataset_query, params)
if dataset is None:
print "Creating new datset"
dataset = omero.model.DatasetI()
dataset.setName(rstring(name))
dataset = \
self.update_service.saveAndReturnObject(dataset)
datasetId = dataset.getId().getValue()
print "\tNew dataset ID:", datasetId
link = omero.model.ProjectDatasetLinkI()
print "\tLinking dataset to:", self.target_project_id
link.parent = omero.model.ProjectI(
self.target_project_id, False)
link.child = omero.model.DatasetI(datasetId, False)
self.update_service.saveObject(link)
dataset = self.query_service.findByQuery(
self.dataset_query, params)
dataset_map[name] = dataset
return dataset_map
def saveImagesToServer(self, dataset_dict):
"""
Convert dataset hash map to list and save linked images to the server.
@param dataset_dict: map(dataset_name, dataset_object)
"""
dataset_list = []
for dataset_name in dataset_dict:
dataset_list.append(dataset_dict[dataset_name])
self.update_service.saveAndReturnArray(dataset_list)
def copyImages(self):
"""
Link images to the target datasets.
"""
dataset_dict = self.getDatasetMap()
for image_id in self.image_dict:
dataset_target = dataset_dict[self.image_dict[image_id]]
image_ids = [
v.id.val for v in dataset_target.linkedImageList()]
if image_id in image_ids:
continue
print "Copying image:", image_id, self.image_dict[image_id]
dataset_target.linkImage(omero.model.ImageI(image_id, False))
self.saveImagesToServer(dataset_dict)
def run(self):
"""
Call this methods after class instantiate to copy the images.
"""
self.copyImages()
return "Done"
if __name__ == "__main__":
global datasetId
dataTypes = [rstring('Dataset')]
client = scripts.client(
'copy_high_res_images.py',
"""
This script copies the full resolution images to the new dataset.
""",
scripts.String(
"Data_Type", optional=False, grouping="1",
description="Choose source of images (only Dataset supported)",
values=dataTypes, default="Dataset"),
scripts.List(
"IDs", optional=False, grouping="2",
description="Dataset to tag and copy images from.").ofType(long),
scripts.Int(
"Project_ID", optional=False, grouping="3",
description="Project ID to create new Dataset in."),
scripts.String(
"Regex_String", optional=False, grouping="4",
description="New dataset name will be based on the image name \
formated by regex", default="^(\w+-\w+)-.*"),
version="0.1",
authors=["Emil Rozbicki"],
institutions=["Glencoe Software Inc."],
contact="emil@glencoesoftware.com",
)
try:
# process the list of args above.
scriptParams = {}
for key in client.getInputKeys():
if client.getInput(key):
scriptParams[key] = client.getInput(key, unwrap=True)
# wrap client to use the Blitz Gateway
conn = BlitzGateway(client_obj=client)
processImages = copyHighResImages(conn, scriptParams)
message = processImages.run()
client.setOutput("Message", rstring(message))
finally:
client.closeSession()
| gpl-2.0 |
jjmiranda/edx-platform | openedx/core/lib/block_structure/transformers.py | 21 | 6043 | """
Module for a collection of BlockStructureTransformers.
"""
import functools
from logging import getLogger
from .exceptions import TransformerException
from .transformer import FilteringTransformerMixin
from .transformer_registry import TransformerRegistry
logger = getLogger(__name__) # pylint: disable=C0103
class BlockStructureTransformers(object):
"""
The BlockStructureTransformers class encapsulates an ordered list of block
structure transformers. It uses the Transformer Registry to verify the
the registration status of added transformers and to collect their data.
It provides aggregate functionality for collection and ordered
transformation of the transformers.
Clients are expected to access the list of transformers through the
class' interface rather than directly.
"""
def __init__(self, transformers=None, usage_info=None):
"""
Arguments:
transformers ([BlockStructureTransformer]) - List of transformers
to add to the collection.
usage_info (any negotiated type) - A usage-specific object
that is passed to the block_structure and forwarded to all
requested Transformers in order to apply a
usage-specific transform. For example, an instance of
usage_info would contain a user object for which the
transform should be applied.
Raises:
TransformerException - if any transformer is not registered in the
Transformer Registry.
"""
self.usage_info = usage_info
self._transformers = {'supports_filter': [], 'no_filter': []}
if transformers:
self.__iadd__(transformers)
def __iadd__(self, transformers):
"""
Adds the given transformers to the collection.
Args:
transformers ([BlockStructureTransformer]) - List of transformers
to add to the collection.
Raises:
TransformerException - if any transformer is not registered in the
Transformer Registry.
"""
unregistered_transformers = TransformerRegistry.find_unregistered(transformers)
if unregistered_transformers:
raise TransformerException(
"The following requested transformers are not registered: {}".format(unregistered_transformers)
)
for transformer in transformers:
if isinstance(transformer, FilteringTransformerMixin):
self._transformers['supports_filter'].append(transformer)
else:
self._transformers['no_filter'].append(transformer)
return self
@classmethod
def collect(cls, block_structure):
"""
Collects data for each registered transformer.
"""
for transformer in TransformerRegistry.get_registered_transformers():
block_structure._add_transformer(transformer) # pylint: disable=protected-access
transformer.collect(block_structure)
# Collect all fields that were requested by the transformers.
block_structure._collect_requested_xblock_fields() # pylint: disable=protected-access
@classmethod
def is_collected_outdated(cls, block_structure):
"""
Returns whether the collected data in the block structure is outdated.
"""
outdated_transformers = []
for transformer in TransformerRegistry.get_registered_transformers():
version_in_block_structure = block_structure._get_transformer_data_version(transformer) # pylint: disable=protected-access
if transformer.VERSION != version_in_block_structure:
outdated_transformers.append(transformer)
if outdated_transformers:
logger.info(
"Collected Block Structure data for the following transformers is outdated: '%s'.",
[(transformer.name(), transformer.VERSION) for transformer in outdated_transformers],
)
return bool(outdated_transformers)
def transform(self, block_structure):
"""
The given block structure is transformed by each transformer in the
collection. Tranformers with filters are combined and run first in a
single course tree traversal, then remaining transformers are run in
the order that they were added.
"""
self._transform_with_filters(block_structure)
self._transform_without_filters(block_structure)
# Prune the block structure to remove any unreachable blocks.
block_structure._prune_unreachable() # pylint: disable=protected-access
def _transform_with_filters(self, block_structure):
"""
Transforms the given block_structure using the transform_block_filters
method from the given transformers.
"""
if not self._transformers['supports_filter']:
return
filters = []
for transformer in self._transformers['supports_filter']:
filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))
combined_filters = functools.reduce(
self._filter_chain,
filters,
block_structure.create_universal_filter()
)
block_structure.filter_topological_traversal(combined_filters)
def _filter_chain(self, accumulated, additional):
"""
Given two functions that take a block_key and return a boolean, yield
a function that takes a block key, and 'ands' the functions together
"""
return lambda block_key: accumulated(block_key) and additional(block_key)
def _transform_without_filters(self, block_structure):
"""
Transforms the given block_structure using the transform
method from the given transformers.
"""
for transformer in self._transformers['no_filter']:
transformer.transform(self.usage_info, block_structure)
| agpl-3.0 |
google/learned_optimization | learned_optimization/research/scaling/scaling_transformer.py | 1 | 8118 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A couple tasks exploring scaling of transformers."""
from typing import Sequence
from absl import app
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import numpy as onp
import jax
import jax.numpy as jnp
import haiku as hk
import gin
import chex
from typing import Optional
from learned_optimization.tasks.datasets import language
from learned_optimization.tasks import base
import functools
class CausalSelfAttention(hk.Module):
"""Multi-headed attention mechanism.
As described in the vanilla Transformer paper:
"Attention is all you need" https://arxiv.org/abs/1706.03762
"""
def __init__(
self,
num_heads: int,
key_size: int,
# TODO(romanring, tycai): migrate to a more generic `w_init` initializer.
w_init: Optional[hk.initializers.Initializer] = None,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * num_heads
if not w_init:
w_init = hk.initializers.VarianceScaling(1.)
self.w_init = w_init
def __call__(
self,
query: jnp.ndarray,
key: Optional[jnp.ndarray] = None,
value: Optional[jnp.ndarray] = None,
mask: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Compute (optionally masked) MHA with queries, keys & values."""
key = key if key is not None else query
value = value if value is not None else query
if query.ndim != 3:
raise ValueError("Expect queries of shape [B, T, D].")
seq_len = query.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
mask = mask * causal_mask if mask is not None else causal_mask
query_heads = self._linear_projection(query, self.key_size, "query")
key_heads = self._linear_projection(key, self.key_size, "key")
value_heads = self._linear_projection(value, self.value_size, "value")
attn_logits = jnp.einsum("...thd,...Thd->...htT", query_heads, key_heads)
sqrt_key_size = np.sqrt(self.key_size).astype(key.dtype)
attn_logits = attn_logits / sqrt_key_size
if mask is not None:
if mask.ndim != attn_logits.ndim:
raise ValueError(f"Mask dimensionality {mask.ndim} must match logits "
f"{attn_logits.ndim}.")
attn_logits = jnp.where(mask, attn_logits, -1e30)
attn_weights = jax.nn.softmax(attn_logits)
attn = jnp.einsum("...htT,...Thd->...thd", attn_weights, value_heads)
# Concatenate attention matrix of all heads into a single vector.
attn_vec = jnp.reshape(attn, (*query.shape[:-1], -1))
return hk.Linear(self.model_size, w_init=self.w_init)(attn_vec)
@hk.transparent
def _linear_projection(self,
x: jnp.ndarray,
head_size: int,
name: Optional[str] = None) -> jnp.ndarray:
y = hk.Linear(self.num_heads * head_size, w_init=self.w_init, name=name)(x)
return y.reshape((*x.shape[:-1], self.num_heads, head_size))
class DenseBlock(hk.Module):
"""A 2-layer MLP which widens then narrows the input."""
def __init__(self,
widening_factor: int = 4,
w_init=None,
name: Optional[str] = None):
super().__init__(name=name)
self._w_init = w_init
self._widening_factor = widening_factor
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
x = hk.Linear(self._widening_factor * hiddens, w_init=self._w_init)(x)
x = jax.nn.gelu(x)
return hk.Linear(hiddens, w_init=self._w_init)(x)
class Transformer(hk.Module):
"""A transformer stack."""
def __init__(self,
num_heads: int,
num_layers: int,
d_model: int,
vocab_size: int,
dropout_rate: float,
name: Optional[str] = None):
super().__init__(name=name)
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_rate = dropout_rate
self._d_model = d_model
self._vocab_size = vocab_size
def __call__(self, h: jnp.ndarray, mask: Optional[jnp.ndarray],
is_training: bool) -> jnp.ndarray:
"""Connects the transformer.
Args:
h: Inputs, [B, T, D].
mask: Padding mask, [B, T].
is_training: Whether we're training or not.
Returns:
Array of shape [B, T, D].
"""
h = hk.Embed(vocab_size=self._vocab_size, embed_dim=self._d_model)(h)
init_scale = 2. / self._num_layers
dropout_rate = self._dropout_rate if is_training else 0.
if mask is not None:
mask = mask[:, None, None, :]
# Note: names chosen to approximately match those used in the GPT-2 code;
# see https://github.com/openai/gpt-2/blob/master/src/model.py.
for i in range(self._num_layers):
h_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name=f"h{i}_ln_1")(
h)
# h_attn = CausalSelfAttentionV2(
# num_heads=self._num_heads,
# hiddens_per_head=h.shape[-1] // self._num_heads,
# init_scale=init_scale,
# relative_pos_emb=True,
# name=f'h{i}_attn')(h_norm, mask=mask)
h_attn = CausalSelfAttention(
num_heads=self._num_heads,
key_size=32,
model_size=h.shape[-1],
w_init=hk.initializers.VarianceScaling(init_scale),
name=f"h{i}_attn")(
h_norm, mask=mask)
h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)
h = h + h_attn
h_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name=f"h{i}_ln_2")(
h)
w_init = hk.initializers.VarianceScaling(init_scale)
h_dense = DenseBlock(name=f"h{i}_mlp", w_init=w_init)(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)
h = h + h_dense
h = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name=f"h_f")(
h)
return hk.Linear(self._vocab_size)(h)
def _make_task(hk_fn, datasets) -> base.Task:
"""Make a Task subclass for the haiku loss and datasets."""
init_net, apply_net = hk.transform(hk_fn)
class _Task(base.Task):
"""Annonomous task object with corresponding loss and datasets."""
def __init__(self):
self.datasets = datasets
def init(self, key: chex.PRNGKey) -> base.Params:
batch = next(datasets.train)
return init_net(key, batch)
def loss(self, params, key, data):
return apply_net(params, key, data)
return _Task()
def ScalingTasks_Transformer(model_size, layers):
ds = language.lm1b_32k_datasets(32, 128)
vocab_size = ds.extra_info["vocab_size"]
def forward(batch):
x = batch["obs"]
logits = Transformer(
num_heads=8,
num_layers=layers,
d_model=model_size,
vocab_size=vocab_size,
dropout_rate=0.1)(
x, mask=(x != 0), is_training=True)
loss = base.softmax_cross_entropy(
logits=logits, labels=jax.nn.one_hot(batch["target"], vocab_size))
return jnp.mean(loss)
return _make_task(forward, ds)
for size in [2**i for i in range(4, 15)]:
name = "ScalingTasks_Transformer_5layer_%dsize" % size
locals()[name] = gin.external_configurable(
functools.partial(ScalingTasks_Transformer, size, 5), name)
del name
| apache-2.0 |
justincassidy/scikit-learn | sklearn/utils/testing.py | 83 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
HazyResearch/metal | metal/end_model/end_model.py | 1 | 8417 | import torch
import torch.nn as nn
import torch.nn.functional as F
from metal.classifier import Classifier
from metal.end_model.em_defaults import em_default_config
from metal.end_model.identity_module import IdentityModule
from metal.end_model.loss import SoftCrossEntropyLoss
from metal.utils import MetalDataset, pred_to_prob, recursive_merge_dicts
class EndModel(Classifier):
"""A dynamically constructed discriminative classifier
layer_out_dims: a list of integers corresponding to the output sizes
of the layers of your network. The first element is the
dimensionality of the input layer, the last element is the
dimensionality of the head layer (equal to the cardinality of the
task), and all other elements dictate the sizes of middle layers.
The number of middle layers will be inferred from this list.
input_module: (nn.Module) a module that converts the user-provided
model inputs to torch.Tensors. Defaults to IdentityModule.
middle_modules: (nn.Module) a list of modules to execute between the
input_module and task head. Defaults to nn.Linear.
head_module: (nn.Module) a module to execute right before the final
softmax that outputs a prediction for the task.
"""
def __init__(
self,
layer_out_dims,
input_module=None,
middle_modules=None,
head_module=None,
**kwargs,
):
if len(layer_out_dims) < 2 and not kwargs["skip_head"]:
raise ValueError(
"Arg layer_out_dims must have at least two "
"elements corresponding to the output dim of the input module "
"and the cardinality of the task. If the input module is the "
"IdentityModule, then the output dim of the input module will "
"be equal to the dimensionality of your input data points"
)
# Add layer_out_dims to kwargs so it will be merged into the config dict
kwargs["layer_out_dims"] = layer_out_dims
config = recursive_merge_dicts(em_default_config, kwargs, misses="insert")
super().__init__(k=layer_out_dims[-1], config=config)
self._build(input_module, middle_modules, head_module)
# Show network
if self.config["verbose"]:
print("\nNetwork architecture:")
self._print()
print()
def _build(self, input_module, middle_modules, head_module):
"""
TBD
"""
input_layer = self._build_input_layer(input_module)
middle_layers = self._build_middle_layers(middle_modules)
# Construct list of layers
layers = [input_layer]
if middle_layers is not None:
layers += middle_layers
if not self.config["skip_head"]:
head = self._build_task_head(head_module)
layers.append(head)
# Construct network
if len(layers) > 1:
self.network = nn.Sequential(*layers)
else:
self.network = layers[0]
# Construct loss module
loss_weights = self.config["train_config"]["loss_weights"]
if loss_weights is not None and self.config["verbose"]:
print(f"Using class weight vector {loss_weights}...")
reduction = self.config["train_config"]["loss_fn_reduction"]
self.criteria = SoftCrossEntropyLoss(
weight=self._to_torch(loss_weights, dtype=torch.FloatTensor),
reduction=reduction,
)
def _build_input_layer(self, input_module):
if input_module is None:
input_module = IdentityModule()
output_dim = self.config["layer_out_dims"][0]
input_layer = self._make_layer(
input_module,
"input",
self.config["input_layer_config"],
output_dim=output_dim,
)
return input_layer
def _build_middle_layers(self, middle_modules):
layer_out_dims = self.config["layer_out_dims"]
num_mid_layers = len(layer_out_dims) - 2
if num_mid_layers == 0:
return None
middle_layers = nn.ModuleList()
for i in range(num_mid_layers):
if middle_modules is None:
module = nn.Linear(*layer_out_dims[i : i + 2])
output_dim = layer_out_dims[i + 1]
else:
module = middle_modules[i]
output_dim = None
layer = self._make_layer(
module,
"middle",
self.config["middle_layer_config"],
output_dim=output_dim,
)
middle_layers.add_module(f"layer{i+1}", layer)
return middle_layers
def _build_task_head(self, head_module):
if head_module is None:
head = nn.Linear(self.config["layer_out_dims"][-2], self.k)
else:
# Note that if head module is provided, it must have input dim of
# the last middle module and output dim of self.k, the cardinality
head = head_module
return head
def _make_layer(self, module, prefix, layer_config, output_dim=None):
if isinstance(module, IdentityModule):
return module
layer = [module]
if layer_config[f"{prefix}_relu"]:
layer.append(nn.ReLU())
if layer_config[f"{prefix}_batchnorm"] and output_dim:
layer.append(nn.BatchNorm1d(output_dim))
if layer_config[f"{prefix}_dropout"]:
layer.append(nn.Dropout(layer_config[f"{prefix}_dropout"]))
if len(layer) > 1:
return nn.Sequential(*layer)
else:
return layer[0]
def _print(self):
print(self.network)
def forward(self, x):
"""Returns a list of outputs for tasks 0,...t-1
Args:
x: a [batch_size, ...] batch from X
"""
return self.network(x)
@staticmethod
def _reset_module(m):
"""A method for resetting the parameters of any module in the network
First, handle special cases (unique initialization or none required)
Next, use built in method if available
Last, report that no initialization occured to avoid silent failure.
This will be called on all children of m as well, so do not recurse
manually.
"""
if callable(getattr(m, "reset_parameters", None)):
m.reset_parameters()
def update_config(self, update_dict):
"""Updates self.config with the values in a given update dictionary"""
self.config = recursive_merge_dicts(self.config, update_dict)
def _preprocess_Y(self, Y, k):
"""Convert Y to prob labels if necessary"""
Y = Y.clone()
# If preds, convert to probs
if Y.dim() == 1 or Y.shape[1] == 1:
Y = pred_to_prob(Y.long(), k=k)
return Y
def _create_dataset(self, *data):
return MetalDataset(*data)
def _get_loss_fn(self):
criteria = self.criteria.to(self.config["device"])
# This self.preprocess_Y allows us to not handle preprocessing
# in a custom dataloader, but decreases speed a bit
loss_fn = lambda X, Y: criteria(self.forward(X), self._preprocess_Y(Y, self.k))
return loss_fn
def train_model(self, train_data, valid_data=None, log_writer=None, **kwargs):
self.config = recursive_merge_dicts(self.config, kwargs)
# If train_data is provided as a tuple (X, Y), we can make sure Y is in
# the correct format
# NOTE: Better handling for if train_data is Dataset or DataLoader...?
if isinstance(train_data, (tuple, list)):
X, Y = train_data
Y = self._preprocess_Y(self._to_torch(Y, dtype=torch.FloatTensor), self.k)
train_data = (X, Y)
# Convert input data to data loaders
train_loader = self._create_data_loader(train_data, shuffle=True)
# Create loss function
loss_fn = self._get_loss_fn()
# Execute training procedure
self._train_model(
train_loader, loss_fn, valid_data=valid_data, log_writer=log_writer
)
def predict_proba(self, X):
"""Returns a [n, k] tensor of probs (probabilistic labels)."""
return F.softmax(self.forward(X), dim=1).data.cpu().numpy()
| apache-2.0 |
Lawrence-Liu/scikit-learn | examples/cluster/plot_dbscan.py | 343 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/sandbox/descstats.py | 27 | 6366 | '''
Glue for returning descriptive statistics.
'''
import numpy as np
from scipy import stats
import os
from statsmodels.stats.descriptivestats import sign_test
#############################################
#
#============================================
# Univariate Descriptive Statistics
#============================================
#
def descstats(data, cols=None, axis=0):
'''
Prints descriptive statistics for one or multiple variables.
Parameters
------------
data: numpy array
`x` is the data
v: list, optional
A list of the column number or field names (for a recarray) of variables.
Default is all columns.
axis: 1 or 0
axis order of data. Default is 0 for column-ordered data.
Examples
--------
>>> descstats(data.exog,v=['x_1','x_2','x_3'])
'''
x = np.array(data) # or rather, the data we're interested in
if cols is None:
# if isinstance(x, np.recarray):
# cols = np.array(len(x.dtype.names))
if not isinstance(x, np.recarray) and x.ndim == 1:
x = x[:,None]
if x.shape[1] == 1:
desc = '''
---------------------------------------------
Univariate Descriptive Statistics
---------------------------------------------
Var. Name %(name)12s
----------
Obs. %(nobs)22i Range %(range)22s
Sum of Wts. %(sum)22s Coeff. of Variation %(coeffvar)22.4g
Mode %(mode)22.4g Skewness %(skewness)22.4g
Repeats %(nmode)22i Kurtosis %(kurtosis)22.4g
Mean %(mean)22.4g Uncorrected SS %(uss)22.4g
Median %(median)22.4g Corrected SS %(ss)22.4g
Variance %(variance)22.4g Sum Observations %(sobs)22.4g
Std. Dev. %(stddev)22.4g
''' % {'name': cols, 'sum': 'N/A', 'nobs': len(x), 'mode': \
stats.mode(x)[0][0], 'nmode': stats.mode(x)[1][0], \
'mean': x.mean(), 'median': np.median(x), 'range': \
'('+str(x.min())+', '+str(x.max())+')', 'variance': \
x.var(), 'stddev': x.std(), 'coeffvar': \
stats.variation(x), 'skewness': stats.skew(x), \
'kurtosis': stats.kurtosis(x), 'uss': np.sum(x**2, axis=0),\
'ss': np.sum((x-x.mean())**2, axis=0), 'sobs': np.sum(x)}
desc+= '''
Percentiles
-------------
1 %% %12.4g
5 %% %12.4g
10 %% %12.4g
25 %% %12.4g
50 %% %12.4g
75 %% %12.4g
90 %% %12.4g
95 %% %12.4g
99 %% %12.4g
''' % tuple([stats.scoreatpercentile(x,per) for per in (1,5,10,25,
50,75,90,95,99)])
t,p_t=stats.ttest_1samp(x,0)
M,p_M=sign_test(x)
S,p_S=stats.wilcoxon(np.squeeze(x))
desc+= '''
Tests of Location (H0: Mu0=0)
-----------------------------
Test Statistic Two-tailed probability
-----------------+-----------------------------------------
Student's t | t %7.5f Pr > |t| <%.4f
Sign | M %8.2f Pr >= |M| <%.4f
Signed Rank | S %8.2f Pr >= |S| <%.4f
''' % (t,p_t,M,p_M,S,p_S)
# Should this be part of a 'descstats'
# in any event these should be split up, so that they can be called
# individually and only returned together if someone calls summary
# or something of the sort
elif x.shape[1] > 1:
desc ='''
Var. Name | Obs. Mean Std. Dev. Range
------------+--------------------------------------------------------'''+\
os.linesep
# for recarrays with columns passed as names
# if isinstance(cols[0],str):
# for var in cols:
# desc += "%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g \
#%(range)20s" % {'name': var, 'obs': len(x[var]), 'mean': x[var].mean(),
# 'stddev': x[var].std(), 'range': '('+str(x[var].min())+', '\
# +str(x[var].max())+')'+os.linesep}
# else:
for var in range(x.shape[1]):
desc += "%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g \
%(range)20s" % {'name': var, 'obs': len(x[:,var]), 'mean': x[:,var].mean(),
'stddev': x[:,var].std(), 'range': '('+str(x[:,var].min())+', '+\
str(x[:,var].max())+')'+os.linesep}
else:
raise ValueError("data not understood")
return desc
#if __name__=='__main__':
# test descstats
# import os
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# relpath=(load_dataset(loc))
# dta=np.recfromcsv(relpath)
# descstats(dta,['stpop'])
# raw_input('Hit enter for multivariate test')
# descstats(dta,['stpop','avginc','vio'])
# with plain arrays
# import string2dummy as s2d
# dts=s2d.string2dummy(dta)
# ndts=np.vstack(dts[col] for col in dts.dtype.names)
# observations in columns and data in rows
# is easier for the call to stats
# what to make of
# ndts=np.column_stack(dts[col] for col in dts.dtype.names)
# ntda=ntds.swapaxis(1,0)
# ntda is ntds returns false?
# or now we just have detailed information about the different strings
# would this approach ever be inappropriate for a string typed variable
# other than dates?
# descstats(ndts, [1])
# raw_input("Enter to try second part")
# descstats(ndts, [1,20,3])
if __name__ == '__main__':
import statsmodels.api as sm
import os
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=False)
sum1 = descstats(data.exog)
sum1a = descstats(data.exog[:,:1])
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# dta=np.recfromcsv(loc)
# summary2 = descstats(dta,['stpop'])
# summary3 = descstats(dta,['stpop','avginc','vio'])
#TODO: needs a by argument
# summary4 = descstats(dta) this fails
# this is a bug
# p = dta[['stpop']]
# p.view(dtype = np.float, type = np.ndarray)
# this works
# p.view(dtype = np.int, type = np.ndarray)
### This is *really* slow ###
if os.path.isfile('./Econ724_PS_I_Data.csv'):
data2 = np.recfromcsv('./Econ724_PS_I_Data.csv')
sum2 = descstats(data2.ahe)
sum3 = descstats(np.column_stack((data2.ahe,data2.yrseduc)))
sum4 = descstats(np.column_stack(([data2[_] for \
_ in data2.dtype.names])))
| bsd-3-clause |
behzadnouri/scipy | scipy/stats/mstats_basic.py | 30 | 84684 | """
An extension of scipy.stats.stats to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ?
# TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__all__ = ['argstoarray',
'betai',
'count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
from scipy._lib.six import iteritems
import itertools
import warnings
from collections import namedtuple
from . import distributions
import scipy.special as special
from ._stats_mstats_common import (
_find_repeats,
linregress as stats_linregress,
theilslopes as stats_theilslopes
)
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" (%s <> %s)" % (na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
The input is cast to float64. Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
# Make sure we get a copy. ma.compressed promises a "new array", but can
# actually return a reference.
compr = np.asarray(ma.compressed(arr), dtype=np.float64)
if compr is arr or compr.base is arr:
compr = compr.copy()
return _find_repeats(compr)
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Notes
-----
For more details, see `stats.mode`.
"""
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
not_masked_indices = ma.flatnotmasked_edges(a)
first_not_masked_index = not_masked_indices[0]
return (a[first_not_masked_index], 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
return ModeResult(*output)
@np.deprecate(message="mstats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead.")
def betai(a, b, x):
"""
betai() is deprecated in scipy 0.17.0.
For details about this function, see `stats.betai`.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as `x` increases, so does
`y`. Negative correlations imply that as `x` increases, `y` decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1-D array_like
Input
y : 1-D array_like
Input
Returns
-------
pearsonr : float
Pearson's correlation coefficient, 2-tailed p-value.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
r_num = ma.add.reduce(xm*ym)
r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym))
r = r_num / r_den
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n - 2
if r is masked or abs(r) == 1.0:
prob = 0.
else:
t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r
prob = _betai(0.5*df, 0.5, df/(df + t_squared))
return r, prob
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(x, y, use_ties=True):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : array_like
The length of `x` must be > 2.
y : array_like
The length of `y` must be > 2.
use_ties : bool, optional
Whether the correction for ties should be computed.
Returns
-------
correlation : float
Spearman correlation coefficient
pvalue : float
2-tailed p-value.
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = _betai(0.5*df, 0.5, df/(df + t * t))
return SpearmanrResult(rho, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, use_ties=True, use_missing=False):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
correlation : float
Kendall tau
pvalue : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
if n < 2:
return KendalltauResult(np.nan, np.nan)
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return KendalltauResult(tau, prob)
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and its p-value.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
For more details on `pointbiserialr`, see `stats.pointbiserialr`.
"""
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = _betai(0.5*df, 0.5, df/(df+t*t))
return PointbiserialrResult(rpb, prob)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if y is None:
x = ma.array(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = ma.array(x)
y = ma.array(y)
x = x.flatten()
y = y.flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
slope, intercept, r, prob, sterrest = stats_linregress(x.data[~m],
y.data[~m])
else:
# All data is masked
return None, None, None, None, None
else:
slope, intercept, r, prob, sterrest = stats_linregress(x.data, y.data)
return LinregressResult(slope, intercept, r, prob, sterrest)
if stats_linregress.__doc__:
linregress.__doc__ = stats_linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
For more details on `theilslopes`, see `stats.theilslopes`.
"""
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `stats.theilslopes`
return stats_theilslopes(y, x, alpha=alpha)
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0):
"""
Calculates the T-test for the mean of ONE group of scores.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_1samp`, see `stats.ttest_1samp`.
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n - 1.0)
svar = ((n - 1.0) * v) / df
with np.errstate(divide='ignore', invalid='ignore'):
t = (x - popmean) / ma.sqrt(svar / n)
prob = special.betainc(0.5*df, 0.5, df/(df + t*t))
return Ttest_1sampResult(t, prob)
ttest_onesamp = ttest_1samp
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind(a, b, axis=0, equal_var=True):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True, perform a standard independent 2 sample test that assumes equal
population variances.
If False, perform Welch's t-test, which does not assume equal population
variance.
.. versionadded:: 0.17.0
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
For more details on `ttest_ind`, see `stats.ttest_ind`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
if equal_var:
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n1 + n2 - 2.0)
svar = ((n1-1)*v1+(n2-1)*v2) / df
denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
else:
vn1 = v1/n1
vn2 = v2/n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero.
# It doesn't matter what df is as long as it is not NaN.
df = np.where(np.isnan(df), 1, df)
denom = ma.sqrt(vn1 + vn2)
with np.errstate(divide='ignore', invalid='ignore'):
t = (x1-x2) / denom
probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape)
return Ttest_indResult(t, probs.squeeze())
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_rel`, see `stats.ttest_rel`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = ma.asanyarray(n-1.0)
d = (a-b).astype('d')
dm = d.mean(axis)
v = d.var(axis=axis, ddof=1)
denom = ma.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = dm / denom
probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze()
return Ttest_relResult(t, probs)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return MannwhitneyuResult(u, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
For more details on `kruskal`, see `stats.kruskal`.
"""
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = distributions.chi2.sf(H, df)
return KruskalResult(H, prob)
kruskalwallis = kruskal
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
@np.deprecate(message="mstats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
threshold : ndarray
Returns `a`, with values less then `threshmin` and values greater
`threshmax` replaced with `newval`.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> from scipy.stats.mstats import trim
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> print(trim(z,(3,8)))
[-- -- 3 4 5 6 7 8 -- --]
>>> print(trim(z,(0.1,0.2),relative=True))
[-- 2 3 4 5 6 7 8 -- --]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__ is not None:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is None.
Returns
-------
tmean : float
Notes
-----
For more details on `tmean`, see `stats.tmean`.
"""
return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
For more details on `tvar`, see `stats.tvar`.
"""
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
return np.ma.var(a) * n/(n-1.)
am = _mask_to_limits(a, limits=limits, inclusive=inclusive)
return np.ma.var(am, axis=axis, ddof=ddof)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float, int or ndarray
Notes
-----
For more details on `tmin`, see `stats.tmin`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float, int or ndarray
Notes
-----
For more details on `tmax`, see `stats.tmax`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
For more details on `tsem`, see `stats.tsem`.
"""
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(axis=axis, ddof=ddof)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(axis=axis, ddof=ddof))
return sd / np.sqrt(am.count())
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit)
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc)
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
Notes
-----
For more details about `moment`, see `stats.moment`.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - ma.expand_dims(a.mean(axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `stats.variation`.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
For more details about `skew`, see `stats.skew`.
"""
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
Notes
-----
For more details about `kurtosis`, see `stats.kurtosis`.
"""
a, axis = _chk_asarray(a, axis)
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=0, bias=True):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> from scipy.stats.mstats import describe
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
DescribeResult(nobs=array(3), minmax=(masked_array(data = 0,
mask = False,
fill_value = 999999)
, masked_array(data = 2,
mask = False,
fill_value = 999999)
), mean=1.0, variance=0.66666666666666663, skewness=masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
, kurtosis=-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
For more details about `skewtest`, see `stats.skewtest`.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
For more details about `kurtosistest`, see `stats.kurtosistest`.
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom < 0] = masked
elif denom < 0:
denom = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
Notes
-----
For more details about `normaltest`, see `stats.normaltest`.
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: http://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
... [ 47., 15., 2.],
... [ 49., 36., 3.],
... [ 15., 39., 4.],
... [ 42., 40., -999.],
... [ 41., 41., -999.],
... [ 7., -999., -999.],
... [ 39., -999., -999.],
... [ 43., -999., -999.],
... [ 40., -999., -999.],
... [ 36., -999., -999.]])
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[ 19.2 14.6 1.45]
[ 40. 37.5 2.5 ]
[ 42.8 40.05 3.55]]
>>> data[:, 2] = -999.
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[19.200000000000003 14.6 --]
[40.0 37.5 --]
[42.800000000000004 40.05 --]]
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
@np.deprecate(message="mstats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`stats.sem` as well as with the most common definition used (like in the R
documentation).
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> print(stats.mstats.sem(a))
[2.8284271247461903 2.8284271247461903 2.8284271247461903
2.8284271247461903]
Find standard error across the whole array, using n degrees of freedom:
>>> print(stats.mstats.sem(a, axis=None, ddof=0))
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
@np.deprecate(message="mstats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivariate data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
return FriedmanchisquareResult(chisq,
distributions.chi2.sf(chisq, k-1))
| bsd-3-clause |
wavelets/lifelines | tests/test_statistics.py | 3 | 7796 | from __future__ import print_function
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
from lifelines import statistics as stats
from lifelines.datasets import load_waltons, load_g3
def test_sample_size_necessary_under_cph():
assert stats.sample_size_necessary_under_cph(0.8, 1, 0.8, 0.2, 0.139) == (14, 14)
assert stats.sample_size_necessary_under_cph(0.8, 1, 0.5, 0.5, 1.2) == (950, 950)
assert stats.sample_size_necessary_under_cph(0.8, 1.5, 0.5, 0.5, 1.2) == (1231, 821)
assert stats.sample_size_necessary_under_cph(0.8, 1.5, 0.5, 0.5, 1.2, alpha=0.01) == (1832, 1221)
def test_power_under_cph():
assert abs(stats.power_under_cph(12,12, 0.8, 0.2, 0.139) - 0.744937) < 10e-6
assert abs(stats.power_under_cph(12,20, 0.8, 0.2, 1.2) - 0.05178317) < 10e-6
def test_unequal_intensity_with_random_data():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
test_result = stats.logrank_test(data1, data2)
assert test_result.is_significant
def test_logrank_test_output_against_R_1():
df = load_g3()
ix = (df['group'] == 'RIT')
d1, e1 = df.ix[ix]['time'], df.ix[ix]['event']
d2, e2 = df.ix[~ix]['time'], df.ix[~ix]['event']
expected = 0.0138
result = stats.logrank_test(d1, d2, event_observed_A=e1, event_observed_B=e2)
assert abs(result.p_value - expected) < 0.0001
def test_logrank_test_output_against_R_2():
# from https://stat.ethz.ch/education/semesters/ss2011/seminar/contents/presentation_2.pdf
control_T = [1, 1, 2, 2, 3, 4, 4, 5, 5, 8, 8, 8, 8, 11, 11, 12, 12, 15, 17, 22, 23]
control_E = np.ones_like(control_T)
treatment_T = [6, 6, 6, 7, 10, 13, 16, 22, 23, 6, 9, 10, 11, 17, 19, 20, 25, 32, 32, 34, 25]
treatment_E = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
result = stats.logrank_test(control_T, treatment_T, event_observed_A=control_E, event_observed_B=treatment_E)
expected_p_value = 4.17e-05
assert abs(result.p_value - expected_p_value) < 0.0001
assert abs(result.test_statistic - 16.8) < 0.1
def test_rank_test_output_against_R_no_censorship():
"""
> time <- c(10,20,30,10,20,50)
> status <- c(1,1,1,1,1,1)
> treatment <- c(1,1,1,0,0,0)
> survdiff(Surv(time, status) ~ treatment)
"""
result = stats.multivariate_logrank_test([10, 20, 30, 10, 20, 50], [1, 1, 1, 0, 0, 0])
r_p_value = 0.614107
r_stat = 0.254237
assert abs(result.p_value - r_p_value) < 10e-6
assert abs(result.test_statistic - r_stat) < 10e-6
def test_rank_test_output_against_R_with_censorship():
"""
> time <- c(10,20,30,10,20,50)
> status <- c(1,0,1,1,0,1)
> treatment <- c(1,1,1,0,0,0)
> survdiff(Surv(time, status) ~ treatment)
"""
result = stats.multivariate_logrank_test([10, 20, 30, 10, 20, 50], [1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 0, 1])
r_p_value = 0.535143
r_stat = 0.384615
assert abs(result.p_value - r_p_value) < 10e-6
assert abs(result.test_statistic - r_stat) < 10e-6
def test_unequal_intensity_event_observed():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
eventA = np.random.binomial(1, 0.5, size=(2000, 1))
eventB = np.random.binomial(1, 0.5, size=(2000, 1))
result = stats.logrank_test(data1, data2, event_observed_A=eventA, event_observed_B=eventB)
assert result.is_significant
def test_integer_times_logrank_test():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
result = stats.logrank_test(data1, data2)
assert result.is_significant
def test_equal_intensity_with_negative_data():
data1 = np.random.normal(0, size=(2000, 1))
data1 -= data1.mean()
data1 /= data1.std()
data2 = np.random.normal(0, size=(2000, 1))
data2 -= data2.mean()
data2 /= data2.std()
result = stats.logrank_test(data1, data2)
assert not result.is_significant
def test_unequal_intensity_with_negative_data():
data1 = np.random.normal(-5, size=(2000, 1))
data2 = np.random.normal(5, size=(2000, 1))
result = stats.logrank_test(data1, data2)
assert result.is_significant
def test_waltons_dataset():
df = load_waltons()
ix = df['group'] == 'miR-137'
waltonT1 = df.ix[ix]['T']
waltonT2 = df.ix[~ix]['T']
result = stats.logrank_test(waltonT1, waltonT2)
assert result.is_significant
def test_logrank_test_is_symmetric():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
result1 = stats.logrank_test(data1, data2)
result2 = stats.logrank_test(data2, data1)
assert abs(result1.p_value - result2.p_value) < 10e-8
assert result2.is_significant == result1.is_significant
def test_multivariate_unequal_intensities():
T = np.random.exponential(10, size=300)
g = np.random.binomial(2, 0.5, size=300)
T[g == 1] = np.random.exponential(1, size=(g == 1).sum())
result = stats.multivariate_logrank_test(T, g)
assert result.is_significant
def test_pairwise_waltons_dataset_is_significantly_different():
waltons_dataset = load_waltons()
R = stats.pairwise_logrank_test(waltons_dataset['T'], waltons_dataset['group'])
assert R.values[0, 1].is_significant
def test_pairwise_logrank_test_with_identical_data_returns_inconclusive():
t = np.random.exponential(10, size=100)
T = np.tile(t, 3)
g = np.array([1, 2, 3]).repeat(100)
R = stats.pairwise_logrank_test(T, g, alpha=0.99).applymap(lambda r: r.is_significant if r is not None else None)
V = np.array([[None, False, False], [False, None, False], [False, False, None]])
npt.assert_array_equal(R.values, V)
def test_pairwise_allows_dataframes():
N = 100
df = pd.DataFrame(np.empty((N, 3)), columns=["T", "C", "group"])
df["T"] = np.random.exponential(1, size=N)
df["C"] = np.random.binomial(1, 0.6, size=N)
df["group"] = np.random.binomial(2, 0.5, size=N)
stats.pairwise_logrank_test(df['T'], df["group"], event_observed=df["C"])
def test_log_rank_returns_None_if_equal_arrays():
T = np.random.exponential(5, size=200)
result = stats.logrank_test(T, T, alpha=0.95)
assert not result.is_significant
C = np.random.binomial(2, 0.8, size=200)
result = stats.logrank_test(T, T, C, C, alpha=0.95)
assert not result.is_significant
def test_multivariate_log_rank_is_identital_to_log_rank_for_n_equals_2():
N = 200
T1 = np.random.exponential(5, size=N)
T2 = np.random.exponential(5, size=N)
C1 = np.random.binomial(2, 0.9, size=N)
C2 = np.random.binomial(2, 0.9, size=N)
result = stats.logrank_test(T1, T2, C1, C2, alpha=0.95)
T = np.r_[T1, T2]
C = np.r_[C1, C2]
G = np.array([1] * 200 + [2] * 200)
result_m = stats.multivariate_logrank_test(T, G, C, alpha=0.95)
assert result.is_significant == result_m.is_significant
assert result.p_value == result_m.p_value
def test_StatisticalResult_class():
sr = stats.StatisticalResult(True, 0.04, 5.0)
assert sr.is_significant
assert sr.test_result == "Reject Null"
sr = stats.StatisticalResult(None, 0.04, 5.0)
assert not sr.is_significant
assert sr.test_result == "Cannot Reject Null"
sr = stats.StatisticalResult(True, 0.05, 5.0, kw='some_value')
assert hasattr(sr, 'kw')
assert getattr(sr, 'kw') == 'some_value'
assert 'some_value' in sr.__unicode__()
def test_valueerror_is_raised_if_alpha_out_of_bounds():
data1 = np.random.exponential(5, size=(20, 1))
data2 = np.random.exponential(1, size=(20, 1))
with pytest.raises(ValueError):
stats.logrank_test(data1, data2, alpha=95)
| mit |
google/learned_optimization | learned_optimization/tasks/parametric/parametric_utils_test.py | 1 | 2315 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_utils."""
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization.tasks.parametric import parametric_utils
import numpy as onp
from numpy import testing
class ParametricUtilsTest(absltest.TestCase):
def test_SampleImageDataset(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleImageDataset.sample(key)
datasets = parametric_utils.SampleImageDataset.get_dataset(cfg, 8, (8, 8))
_ = datasets.train
def test_SampleActivation(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleActivation.sample(key)
act_fn = parametric_utils.SampleActivation.get_dynamic(cfg)
value = jax.jit(act_fn)(12.)
self.assertEqual(value.shape, ())
act_fn = parametric_utils.SampleActivation.get_static(cfg)
value2 = act_fn(12.)
self.assertEqual(value, value2)
def test_SampleInitializer(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleInitializer.sample(key)
def forward(cfg):
init = parametric_utils.SampleInitializer.get_dynamic(cfg)
param = hk.get_parameter('asdf', [2, 2], dtype=jnp.float32, init=init)
return param
init_fn, _ = hk.transform(forward)
val = jax.jit(init_fn)(key, cfg)
self.assertEqual(jax.tree_util.tree_leaves(val)[0].shape, (2, 2))
def test_orth_init(self):
key = jax.random.PRNGKey(0)
init = parametric_utils.orth_init([16, 16], jnp.float32, key)
# Check that the initializer is orthogonal by checking if all the eval's
evals, unused_evecs = onp.linalg.eig(init)
testing.assert_allclose(onp.abs(evals), jnp.ones([16]), rtol=1e-6)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
quheng/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 205 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
quheng/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 267 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 267 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
rbharath/deepchem | examples/pdbbind/pdbbind_rf.py | 3 | 1332 | """
Script that trains Sklearn RF models on PDBbind dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import os
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from pdbbind_datasets import load_pdbbind_grid
# For stable runs
np.random.seed(123)
split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_grid(
split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "%s_%s_RF" % (split, subset))
sklearn_model = RandomForestRegressor(n_estimators=500)
model = dc.models.SklearnModel(sklearn_model, model_dir=model_dir)
# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
Lawrence-Liu/scikit-learn | sklearn/utils/class_weight.py | 139 | 7206 | # Authors: Andreas Mueller
# Manoj Kumar
# License: BSD 3 clause
import warnings
import numpy as np
from ..externals import six
from ..utils.fixes import in1d
from .fixes import bincount
def compute_class_weight(class_weight, classes, y):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'balanced' or None
If 'balanced', class weights will be given by
``n_samples / (n_classes * np.bincount(y))``.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y : array-like, shape (n_samples,)
Array of original class labels per sample;
Returns
-------
class_weight_vect : ndarray, shape (n_classes,)
Array with class_weight_vect[i] the weight for i-th class
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight in ['auto', 'balanced']:
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
# inversely proportional to the number of samples in the class
if class_weight == 'auto':
recip_freq = 1. / bincount(y_ind)
weight = recip_freq[le.transform(classes)] / np.mean(recip_freq)
warnings.warn("The class_weight='auto' heuristic is deprecated in"
" favor of a new heuristic class_weight='balanced'."
" 'auto' will be removed in 0.18", DeprecationWarning)
else:
recip_freq = len(y) / (len(le.classes_) *
bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
def compute_sample_weight(class_weight, y, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Array of original class labels per sample.
indices : array-like, shape (n_subsample,), or None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "auto" is
supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray, shape (n_samples,)
Array with sample weights as applied to the original y
"""
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, six.string_types):
if class_weight not in ['balanced', 'auto']:
raise ValueError('The only valid preset for class_weight is '
'"balanced". Given "%s".' % class_weight)
elif (indices is not None and
not isinstance(class_weight, six.string_types)):
raise ValueError('The only valid class_weight for subsampling is '
'"balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if (not hasattr(class_weight, "__iter__") or
isinstance(class_weight, dict)):
raise ValueError("For multi-output, class_weight should be a "
"list of dicts, or a valid string.")
if len(class_weight) != n_outputs:
raise ValueError("For multi-output, number of elements in "
"class_weight should match number of outputs.")
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight in ['balanced', 'auto'] or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y[indices, k]
classes_subsample = np.unique(y_subsample)
weight_k = np.choose(np.searchsorted(classes_subsample,
classes_full),
compute_class_weight(class_weight_k,
classes_subsample,
y_subsample),
mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(class_weight_k,
classes_full,
y_full)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[in1d(y_full, list(classes_missing))] = 0.
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight,
axis=0,
dtype=np.float64)
return expanded_class_weight
| bsd-3-clause |
halfak/pyenchant | enchant/tokenize/__init__.py | 2 | 19440 | # pyenchant
#
# Copyright (C) 2004-2009, Ryan Kelly
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# In addition, as a special exception, you are
# given permission to link the code of this program with
# non-LGPL Spelling Provider libraries (eg: a MSFT Office
# spell checker backend) and distribute linked combinations including
# the two. You must obey the GNU Lesser General Public License in all
# respects for all of the code used other than said providers. If you modify
# this file, you may extend this exception to your version of the
# file, but you are not obligated to do so. If you do not wish to
# do so, delete this exception statement from your version.
#
"""
enchant.tokenize: String tokenization functions for PyEnchant
================================================================
An important task in spellchecking is breaking up large bodies of
text into their constituent words, each of which is then checked
for correctness. This package provides Python functions to split
strings into words according to the rules of a particular language.
Each tokenization function accepts a string as its only positional
argument, and returns an iterator that yields tuples of the following
form, one for each word found::
(<word>,<pos>)
The meanings of these fields should be clear: <word> is the word
that was found and <pos> is the position within the text at which
the word began (zero indexed, of course). The function will work
on any string-like object that supports array-slicing; in particular
character-array objects from the 'array' module may be used.
The iterator also provides the attribute 'offset' which gives the current
position of the tokenizer inside the string being split, and the method
'set_offset' for manually adjusting this position. This can be used for
example if the string's contents have changed during the tokenization
process.
To obtain an appropriate tokenization function for the language
identified by <tag>, use the function 'get_tokenizer(tag)'::
tknzr = get_tokenizer("en_US")
for (word,pos) in tknzr("text to be tokenized goes here")
do_something(word)
This library is designed to be easily extendible by third-party
authors. To register a tokenization function for the language
<tag>, implement it as the function 'tokenize' within the
module enchant.tokenize.<tag>. The 'get_tokenizer' function
will automatically detect it. Note that the underscore must be
used as the tag component separator in this case, in order to
form a valid python module name. (e.g. "en_US" rather than "en-US")
Currently, a tokenizer has only been implemented for the English
language. Based on the author's limited experience, this should
be at least partially suitable for other languages.
This module also provides various implementations of "Chunkers" and
"Filters". These classes are designed to make it easy to work with
text in a vareity of common formats, by detecting and excluding parts
of the text that don't need to be checked.
A Chunker is a class designed to break a body of text into large chunks
of checkable content; for example the HTMLChunker class extracts the
text content from all HTML tags but excludes the tags themselves.
A Filter is a class designed to skip individual words during the checking
process; for example the URLFilter class skips over any words that
have the format of a URL.
For exmaple, to spellcheck an HTML document it is necessary to split the
text into chunks based on HTML tags, and to filter out common word forms
such as URLs and WikiWords. This would look something like the following::
tknzr = get_tokenizer("en_US",(HTMLChunker,),(URLFilter,WikiWordFilter)))
text = "<html><body>the url is http://example.com</body></html>"
for (word,pos) in tknzer(text):
...check each word and react accordingly...
"""
_DOC_ERRORS = ["pos","pos","tknzr","URLFilter","WikiWordFilter",
"tkns","tknzr","pos","tkns"]
import re
import warnings
import array
import enchant
from enchant.utils import next, xrange
from enchant.errors import *
# For backwards-compatability. This will eventually be removed, but how
# does one mark a module-level constant as deprecated?
Error = TokenizerNotFoundError
class tokenize:
"""Base class for all tokenizer objects.
Each tokenizer must be an iterator and provide the 'offset'
attribute as described in the documentation for this module.
While tokenizers are in fact classes, they should be treated
like functions, and so are named using lower_case rather than
the CamelCase more traditional of class names.
"""
_DOC_ERRORS = ["CamelCase"]
def __init__(self,text):
self._text = text
self._offset = 0
def __next__(self):
return self.next()
def next(self):
raise NotImplementedError()
def __iter__(self):
return self
def set_offset(self,offset,replaced=False):
self._offset = offset
def _get_offset(self):
return self._offset
def _set_offset(self,offset):
msg = "changing a tokenizers 'offset' attribute is deprecated;"\
" use the 'set_offset' method"
warnings.warn(msg,category=DeprecationWarning,stacklevel=2)
self.set_offset(offset)
offset = property(_get_offset,_set_offset)
def get_tokenizer(tag=None,chunkers=None,filters=None):
"""Locate an appropriate tokenizer by language tag.
This requires importing the function 'tokenize' from an appropriate
module. Modules tried are named after the language tag, tried in the
following order:
* the entire tag (e.g. "en_AU.py")
* the base country code of the tag (e.g. "en.py")
If the language tag is None, a default tokenizer (actually the English
one) is returned. It's unicode aware and should work OK for most
latin-derived languages.
If a suitable function cannot be found, raises TokenizerNotFoundError.
If given and not None, 'chunkers' and 'filters' must be lists of chunker
classes and filter classes resectively. These will be applied to the
tokenizer during creation.
"""
if tag is None:
tag = "en"
# "filters" used to be the second argument. Try to catch cases
# where it is given positionally and issue a DeprecationWarning.
if chunkers is not None and filters is None:
chunkers = list(chunkers)
if chunkers:
try:
chunkers_are_filters = issubclass(chunkers[0],Filter)
except TypeError:
pass
else:
if chunkers_are_filters:
msg = "passing 'filters' as a non-keyword argument "\
"to get_tokenizer() is deprecated"
warnings.warn(msg,category=DeprecationWarning,stacklevel=2)
filters = chunkers
chunkers = None
# Ensure only '_' used as separator
tag = tag.replace("-","_")
# First try the whole tag
tkFunc = _try_tokenizer(tag)
if tkFunc is None:
# Try just the base
base = tag.split("_")[0]
tkFunc = _try_tokenizer(base)
if tkFunc is None:
msg = "No tokenizer found for language '%s'" % (tag,)
raise TokenizerNotFoundError(msg)
# Given the language-specific tokenizer, we now build up the
# end result as follows:
# * chunk the text using any given chunkers in turn
# * begin with basic whitespace tokenization
# * apply each of the given filters in turn
# * apply language-specific rules
tokenizer = basic_tokenize
if chunkers is not None:
chunkers = list(chunkers)
for i in xrange(len(chunkers)-1,-1,-1):
tokenizer = wrap_tokenizer(chunkers[i],tokenizer)
if filters is not None:
for f in filters:
tokenizer = f(tokenizer)
tokenizer = wrap_tokenizer(tokenizer,tkFunc)
return tokenizer
get_tokenizer._DOC_ERRORS = ["py","py"]
class empty_tokenize(tokenize):
"""Tokenizer class that yields no elements."""
_DOC_ERRORS = []
def __init__(self):
tokenize.__init__(self,"")
def next(self):
raise StopIteration()
class unit_tokenize(tokenize):
"""Tokenizer class that yields the text as a single token."""
_DOC_ERRORS = []
def __init__(self,text):
tokenize.__init__(self,text)
self._done = False
def next(self):
if self._done:
raise StopIteration()
self._done = True
return (self._text,0)
class basic_tokenize(tokenize):
"""Tokenizer class that performs very basic word-finding.
This tokenizer does the most basic thing that could work - it splits
text into words based on whitespace boundaries, and removes basic
punctuation symbols from the start and end of each word.
"""
_DOC_ERRORS = []
# Chars to remove from start/end of words
strip_from_start = '"' + "'`(["
strip_from_end = '"' + "'`]).!,?;:"
def next(self):
text = self._text
offset = self._offset
while True:
if offset >= len(text):
break
# Find start of next word
while offset < len(text) and text[offset].isspace():
offset += 1
sPos = offset
# Find end of word
while offset < len(text) and not text[offset].isspace():
offset += 1
ePos = offset
self._offset = offset
# Strip chars from font/end of word
while sPos < len(text) and text[sPos] in self.strip_from_start:
sPos += 1
while 0 < ePos and text[ePos-1] in self.strip_from_end:
ePos -= 1
# Return if word isnt empty
if(sPos < ePos):
return (text[sPos:ePos],sPos)
raise StopIteration()
def _try_tokenizer(modName):
"""Look for a tokenizer in the named module.
Returns the function if found, None otherwise.
"""
modBase = "enchant.tokenize."
funcName = "tokenize"
modName = modBase + modName
try:
mod = __import__(modName,globals(),{},funcName)
return getattr(mod,funcName)
except ImportError:
return None
def wrap_tokenizer(tk1,tk2):
"""Wrap one tokenizer inside another.
This function takes two tokenizer functions 'tk1' and 'tk2',
and returns a new tokenizer function that passes the output
of tk1 through tk2 before yielding it to the calling code.
"""
# This logic is already implemented in the Filter class.
# We simply use tk2 as the _split() method for a filter
# around tk1.
tkW = Filter(tk1)
tkW._split = tk2
return tkW
wrap_tokenizer._DOC_ERRORS = ["tk","tk","tk","tk"]
class Chunker(tokenize):
"""Base class for text chunking functions.
A chunker is designed to chunk text into large blocks of tokens. It
has the same interface as a tokenizer but is for a different purpose.
"""
pass
class Filter(object):
"""Base class for token filtering functions.
A filter is designed to wrap a tokenizer (or another filter) and do
two things:
* skip over tokens
* split tokens into sub-tokens
Subclasses have two basic options for customising their behaviour. The
method _skip(word) may be overridden to return True for words that
should be skipped, and false otherwise. The method _split(word) may
be overridden as tokenization function that will be applied to further
tokenize any words that aren't skipped.
"""
def __init__(self,tokenizer):
"""Filter class constructor."""
self._tokenizer = tokenizer
def __call__(self,*args,**kwds):
tkn = self._tokenizer(*args,**kwds)
return self._TokenFilter(tkn,self._skip,self._split)
def _skip(self,word):
"""Filter method for identifying skippable tokens.
If this method returns true, the given word will be skipped by
the filter. This should be overridden in subclasses to produce the
desired functionality. The default behaviour is not to skip any words.
"""
return False
def _split(self,word):
"""Filter method for sub-tokenization of tokens.
This method must be a tokenization function that will split the
given word into sub-tokens according to the needs of the filter.
The default behaviour is not to split any words.
"""
return unit_tokenize(word)
class _TokenFilter(object):
"""Private inner class implementing the tokenizer-wrapping logic.
This might seem convoluted, but we're trying to create something
akin to a meta-class - when Filter(tknzr) is called it must return
a *callable* that can then be applied to a particular string to
perform the tokenization. Since we need to manage a lot of state
during tokenization, returning a class is the best option.
"""
_DOC_ERRORS = ["tknzr"]
def __init__(self,tokenizer,skip,split):
self._skip = skip
self._split = split
self._tokenizer = tokenizer
# for managing state of sub-tokenization
self._curtok = empty_tokenize()
self._curword = ""
self._curpos = 0
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
# Try to get the next sub-token from word currently being split.
# If unavailable, move on to the next word and try again.
try:
(word,pos) = next(self._curtok)
return (word,pos + self._curpos)
except StopIteration:
(word,pos) = next(self._tokenizer)
while self._skip(self._to_string(word)):
(word,pos) = next(self._tokenizer)
self._curword = word
self._curpos = pos
self._curtok = self._split(word)
return self.next()
def _to_string(self, word):
if type(word) is array.array:
if word.typecode == 'u':
return word.tounicode()
elif word.typecode == 'c':
return word.tostring()
return word
# Pass on access to 'offset' to the underlying tokenizer.
def _get_offset(self):
return self._tokenizer.offset
def _set_offset(self,offset):
msg = "changing a tokenizers 'offset' attribute is deprecated;"\
" use the 'set_offset' method"
warnings.warn(msg,category=DeprecationWarning,stacklevel=2)
self.set_offset(offset)
offset = property(_get_offset,_set_offset)
def set_offset(self,val,replaced=False):
old_offset = self._tokenizer.offset
self._tokenizer.set_offset(val,replaced=replaced)
# If we move forward within the current word, also set on _curtok.
# Otherwise, throw away _curtok and set to empty iterator.
keep_curtok = True
curtok_offset = val - self._curpos
if old_offset > val:
keep_curtok = False
if curtok_offset < 0:
keep_curtok = False
if curtok_offset >= len(self._curword):
keep_curtok = False
if keep_curtok and not replaced:
self._curtok.set_offset(curtok_offset)
else:
self._curtok = empty_tokenize()
self._curword = ""
self._curpos = 0
# Pre-defined chunkers and filters start here
class URLFilter(Filter):
"""Filter skipping over URLs.
This filter skips any words matching the following regular expression:
^[a-zA-z]+:\/\/[^\s].*
That is, any words that are URLs.
"""
_DOC_ERRORS = ["zA"]
_pattern = re.compile(r"^[a-zA-z]+:\/\/[^\s].*")
def _skip(self,word):
if self._pattern.match(word):
return True
return False
class WikiWordFilter(Filter):
"""Filter skipping over WikiWords.
This filter skips any words matching the following regular expression:
^([A-Z]\w+[A-Z]+\w+)
That is, any words that are WikiWords.
"""
_pattern = re.compile(r"^([A-Z]\w+[A-Z]+\w+)")
def _skip(self,word):
if self._pattern.match(word):
return True
return False
class EmailFilter(Filter):
"""Filter skipping over email addresses.
This filter skips any words matching the following regular expression:
^.+@[^\.].*\.[a-z]{2,}$
That is, any words that resemble email addresses.
"""
_pattern = re.compile(r"^.+@[^\.].*\.[a-z]{2,}$")
def _skip(self,word):
if self._pattern.match(word):
return True
return False
class HTMLChunker(Chunker):
"""Chunker for breaking up HTML documents into chunks of checkable text.
The operation of this chunker is very simple - anything between a "<"
and a ">" will be ignored. Later versions may improve the algorithm
slightly.
"""
def next(self):
text = self._text
offset = self.offset
while True:
if offset >= len(text):
break
# Skip to the end of the current tag, if any.
if text[offset] == "<":
maybeTag = offset
if self._is_tag(text,offset):
while text[offset] != ">":
offset += 1
if offset == len(text):
offset = maybeTag+1
break
else:
offset += 1
else:
offset = maybeTag+1
sPos = offset
# Find the start of the next tag.
while offset < len(text) and text[offset] != "<":
offset += 1
ePos = offset
self._offset = offset
# Return if chunk isnt empty
if(sPos < offset):
return (text[sPos:offset],sPos)
raise StopIteration()
def _is_tag(self,text,offset):
if offset+1 < len(text):
if text[offset+1].isalpha():
return True
if text[offset+1] == "/":
return True
return False
#TODO: LaTeXChunker
| lgpl-2.1 |
ningchi/scikit-learn | examples/svm/plot_svm_anova.py | 249 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
msimacek/samba | third_party/dnspython/dns/node.py | 56 | 5869 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
| gpl-3.0 |
RecipeML/Recipe | recipe/classifiers/sgd.py | 1 | 2887 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.linear_model import SGDClassifier
def sgd(args):
"""Uses scikit-learn's SGDClassifier, this estimator implements regularized linear models with stochastic gradient descent (SGD) learning:
the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate)
Parameters
----------
loss : str, ‘hinge’, ‘log’, ‘modified_huber’, ‘squared_hinge’, ‘perceptron’, or a
regression loss: ‘squared_loss’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’
The loss function to be used.
l1_ratio : float
The Elastic Net mixing parameter,
learning_rate : string
The learning rate schedule:
average : bool or int
When set to True, computes the averaged SGD weights and stores the result in the coef_ attribute.
If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average.
penalty : str, ‘none’, ‘l2’, ‘l1’, or ‘elasticnet’
The penalty (aka regularization term) to be used.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.
n_iter : int
The number of passes over the training data (aka epochs).
shuffle : bool
Whether or not the training data should be shuffled after each epoch. Defaults to True.
eta0 : double
The initial learning rate for the ‘constant’ or ‘invscaling’ schedules.
"""
loss_sgd = args[1]
l1 = float(args[2])
lr_sgd = args[3]
if(args[4].find("True")!=-1):
av = True
elif(args[4].find("False")!=-1):
av = False
else:
av = int(args[4])
penal = args[5]
fi = False
if(args[6].find("True")!=-1):
fi = True
it = int(args[7])
sffle = False
if(args[8].find("True")!=-1):
sffle = True
eta = float(args[9])
return SGDClassifier(loss=loss_sgd, penalty=penal, l1_ratio=l1,
fit_intercept=fi, n_iter=it, shuffle=sffle, verbose=0, n_jobs=1,
random_state=42, learning_rate=lr_sgd, eta0=eta, average=av)
| gpl-3.0 |
Akshay0724/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 56 | 7229 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 295 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/manifold/plot_mds.py | 85 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/neighbors/tests/test_kde.py | 13 | 5622 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |