repo_name
stringlengths
6
100
path
stringlengths
4
191
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
935
727k
license
stringclasses
15 values
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/parsers.py
1
92704
""" Module contains tools for processing files into DataFrames or other objects """ from __future__ import print_function from pandas.compat import range, lrange, StringIO, lzip, zip, string_types, map from pandas import compat from collections import defaultdict import re import csv import warnings import numpy as np from pandas.core.index import Index, MultiIndex from pandas.core.frame import DataFrame import datetime import pandas.core.common as com from pandas.core.common import AbstractMethodError from pandas.core.config import get_option from pandas.io.date_converters import generic_parser from pandas.io.common import (get_filepath_or_buffer, _validate_header_arg, _get_handle, UnicodeReader, UTF8Recoder, BaseIterator, CParserError, EmptyDataError, ParserWarning) from pandas.tseries import tools from pandas.util.decorators import Appender import pandas.lib as lib import pandas.parser as _parser # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', _NA_VALUES = set([ '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', '' ]) _parser_params = """Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \ object with a read() method (such as a file handle or StringIO) The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. For instance, a local file could be file ://localhost/path/to/table.csv %s delimiter : str, default ``None`` Alternative argument name for sep. delim_whitespace : boolean, default False Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be used as the sep. Equivalent to setting ``sep='\+s'``. If this option is set to True, nothing should be passed in for the ``delimiter`` parameter. .. versionadded:: 0.18.1 support for the Python parser. header : int or list of ints, default 'infer' Row number(s) to use as the column names, and the start of the data. Default behavior is as if set to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so header=0 denotes the first line of data rather than the first line of the file. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None index_col : int or sequence or False, default None Column to use as the row labels of the DataFrame. If a sequence is given, a MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider index_col=False to force pandas to _not_ use the first column as the index (row names) usecols : array-like, default None Return a subset of the columns. All elements in this array must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Using this parameter results in much faster parsing time and lower memory usage. squeeze : boolean, default False If the parsed data only contains one column then return a Series prefix : str, default None Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : boolean, default True Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X' dtype : Type name or dict of column -> type, default None Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} (Unsupported with engine='python'). Use `str` or `object` to preserve and not interpret dtype. %s converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels true_values : list, default None Values to consider as True false_values : list, default None Values to consider as False skipinitialspace : boolean, default False Skip spaces after delimiter. skiprows : list-like or integer, default None Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c') nrows : int, default None Number of rows of file to read. Useful for reading pieces of large files na_values : str or list-like or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: `'""" + "'`, `'".join(sorted(_NA_VALUES)) + """'`. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. na_filter : boolean, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file verbose : boolean, default False Indicate number of NA values placed in non-numeric columns skip_blank_lines : boolean, default True If True, skip over blank lines rather than interpreting as NaN values parse_dates : boolean or list of ints or names or list of lists or dict, \ default False * boolean. If True -> try parsing the index. * list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo' Note: A fast-path exists for iso8601-formatted dates. infer_datetime_format : boolean, default False If True and parse_dates is enabled, pandas will attempt to infer the format of the datetime strings in the columns, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. keep_date_col : boolean, default False If True and parse_dates specifies combining multiple columns then keep the original columns. date_parser : function, default None Function to use for converting a sequence of string columns to an array of datetime instances. The default uses ``dateutil.parser.parser`` to do the conversion. Pandas will try to call date_parser in three different ways, advancing to the next if an exception occurs: 1) Pass one or more arrays (as defined by parse_dates) as arguments; 2) concatenate (row-wise) the string values from the columns defined by parse_dates into a single array and pass that; and 3) call date_parser once for each row using one or more strings (corresponding to the columns defined by parse_dates) as arguments. dayfirst : boolean, default False DD/MM format dates, international and European format iterator : boolean, default False Return TextFileReader object for iteration or getting chunks with ``get_chunk()``. chunksize : int, default None Return TextFileReader object for iteration. `See IO Tools docs for more information <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ on ``iterator`` and ``chunksize``. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if filepath_or_buffer is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression. thousands : str, default None Thousands separator decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). lineterminator : str (length 1), default None Character to break file into lines. Only valid with C parser. quotechar : str (length 1), optional The character used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored. quoting : int or csv.QUOTE_* instance, default None Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3). Default (None) results in QUOTE_MINIMAL behavior. escapechar : str (length 1), default None One-character string used to escape delimiter when quoting is QUOTE_NONE. comment : str, default None Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. Like empty lines (as long as ``skip_blank_lines=True``), fully commented lines are ignored by the parameter `header` but not by `skiprows`. For example, if comment='#', parsing '#empty\\na,b,c\\n1,2,3' with `header=0` will result in 'a,b,c' being treated as the header. encoding : str, default None Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ dialect : str or csv.Dialect instance, default None If None defaults to Excel dialect. Ignored if sep longer than 1 char See csv.Dialect documentation for more details tupleize_cols : boolean, default False Leave a list of tuples on columns as is (default is to convert to a Multi Index on the columns) error_bad_lines : boolean, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will dropped from the DataFrame that is returned. (Only valid with C parser) warn_bad_lines : boolean, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. (Only valid with C parser). Returns ------- result : DataFrame or TextParser """ # engine is not used in read_fwf() so is factored out of the shared docstring _engine_doc = """engine : {'c', 'python'}, optional Parser engine to use. The C engine is faster while the python engine is currently more feature-complete.""" _sep_doc = """sep : str, default {default} Delimiter to use. If sep is None, will try to automatically determine this. Separators longer than 1 character and different from '\s+' will be interpreted as regular expressions, will force use of the python parsing engine and will ignore quotes in the data. Regex example: '\\r\\t'""" _read_csv_doc = """ Read CSV (comma-separated) file into DataFrame %s """ % (_parser_params % (_sep_doc.format(default="','"), _engine_doc)) _read_table_doc = """ Read general delimited file into DataFrame %s """ % (_parser_params % (_sep_doc.format(default="\\t (tab-stop)"), _engine_doc)) _fwf_widths = """\ colspecs : list of pairs (int, int) or 'infer'. optional A list of pairs (tuples) giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data (default='infer'). widths : list of ints. optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. """ _read_fwf_doc = """ Read a table of fixed-width formatted lines into DataFrame %s Also, 'delimiter' is used to specify the filler character of the fields if it is not spaces (e.g., '~'). """ % (_parser_params % (_fwf_widths, '')) def _read(filepath_or_buffer, kwds): "Generic reader of line files." encoding = kwds.get('encoding', None) skipfooter = kwds.pop('skipfooter', None) if skipfooter is not None: kwds['skip_footer'] = skipfooter # If the input could be a filename, check for a recognizable compression # extension. If we're reading from a URL, the `get_filepath_or_buffer` # will use header info to determine compression, so use what it finds in # that case. inferred_compression = kwds.get('compression') if inferred_compression == 'infer': if isinstance(filepath_or_buffer, compat.string_types): if filepath_or_buffer.endswith('.gz'): inferred_compression = 'gzip' elif filepath_or_buffer.endswith('.bz2'): inferred_compression = 'bz2' elif filepath_or_buffer.endswith('.zip'): inferred_compression = 'zip' elif filepath_or_buffer.endswith('.xz'): inferred_compression = 'xz' else: inferred_compression = None else: inferred_compression = None filepath_or_buffer, _, compression = get_filepath_or_buffer( filepath_or_buffer, encoding, compression=kwds.get('compression', None)) kwds['compression'] = (inferred_compression if compression == 'infer' else compression) if kwds.get('date_parser', None) is not None: if isinstance(kwds['parse_dates'], bool): kwds['parse_dates'] = True # Extract some of the arguments (pass chunksize on). iterator = kwds.get('iterator', False) nrows = kwds.pop('nrows', None) chunksize = kwds.get('chunksize', None) # Create the parser. parser = TextFileReader(filepath_or_buffer, **kwds) if (nrows is not None) and (chunksize is not None): raise NotImplementedError("'nrows' and 'chunksize' can not be used" " together yet.") elif nrows is not None: return parser.read(nrows) elif chunksize or iterator: return parser return parser.read() _parser_defaults = { 'delimiter': None, 'doublequote': True, 'escapechar': None, 'quotechar': '"', 'quoting': csv.QUOTE_MINIMAL, 'skipinitialspace': False, 'lineterminator': None, 'header': 'infer', 'index_col': None, 'names': None, 'prefix': None, 'skiprows': None, 'na_values': None, 'true_values': None, 'false_values': None, 'skip_footer': 0, 'converters': None, 'keep_default_na': True, 'thousands': None, 'comment': None, # 'engine': 'c', 'parse_dates': False, 'keep_date_col': False, 'dayfirst': False, 'date_parser': None, 'usecols': None, # 'nrows': None, # 'iterator': False, 'chunksize': None, 'verbose': False, 'encoding': None, 'squeeze': False, 'compression': None, 'mangle_dupe_cols': True, 'tupleize_cols': False, 'infer_datetime_format': False, 'skip_blank_lines': True } _c_parser_defaults = { 'delim_whitespace': False, 'as_recarray': False, 'na_filter': True, 'compact_ints': False, 'use_unsigned': False, 'low_memory': True, 'memory_map': False, 'buffer_lines': None, 'error_bad_lines': True, 'warn_bad_lines': True, 'dtype': None, 'decimal': b'.', 'float_precision': None } _fwf_defaults = { 'colspecs': 'infer', 'widths': None, } _c_unsupported = set(['skip_footer']) _python_unsupported = set([ 'as_recarray', 'na_filter', 'compact_ints', 'use_unsigned', 'low_memory', 'memory_map', 'buffer_lines', 'error_bad_lines', 'warn_bad_lines', 'dtype', 'decimal', 'float_precision', ]) def _make_parser_function(name, sep=','): default_sep = sep def parser_f(filepath_or_buffer, sep=sep, delimiter=None, # Column and Index Locations and Names header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, # General Parsing Configuration dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=None, nrows=None, # NA and Missing Data Handling na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, # Datetime Handling parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, # Iteration iterator=False, chunksize=None, # Quoting, Compression, and File Format compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=csv.QUOTE_MINIMAL, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=False, # Error Handling error_bad_lines=True, warn_bad_lines=True, # Deprecated skip_footer=0, # Internal doublequote=True, delim_whitespace=False, as_recarray=False, compact_ints=False, use_unsigned=False, low_memory=_c_parser_defaults['low_memory'], buffer_lines=None, memory_map=False, float_precision=None): # Alias sep -> delimiter. if delimiter is None: delimiter = sep if delim_whitespace and delimiter is not default_sep: raise ValueError("Specified a delimiter with both sep and" " delim_whitespace=True; you can only" " specify one.") if engine is not None: engine_specified = True else: engine = 'c' engine_specified = False kwds = dict(delimiter=delimiter, engine=engine, dialect=dialect, compression=compression, engine_specified=engine_specified, doublequote=doublequote, escapechar=escapechar, quotechar=quotechar, quoting=quoting, skipinitialspace=skipinitialspace, lineterminator=lineterminator, header=header, index_col=index_col, names=names, prefix=prefix, skiprows=skiprows, na_values=na_values, true_values=true_values, false_values=false_values, keep_default_na=keep_default_na, thousands=thousands, comment=comment, decimal=decimal, parse_dates=parse_dates, keep_date_col=keep_date_col, dayfirst=dayfirst, date_parser=date_parser, nrows=nrows, iterator=iterator, chunksize=chunksize, skipfooter=skipfooter or skip_footer, converters=converters, dtype=dtype, usecols=usecols, verbose=verbose, encoding=encoding, squeeze=squeeze, memory_map=memory_map, float_precision=float_precision, na_filter=na_filter, compact_ints=compact_ints, use_unsigned=use_unsigned, delim_whitespace=delim_whitespace, as_recarray=as_recarray, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, low_memory=low_memory, buffer_lines=buffer_lines, mangle_dupe_cols=mangle_dupe_cols, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format, skip_blank_lines=skip_blank_lines) return _read(filepath_or_buffer, kwds) parser_f.__name__ = name return parser_f read_csv = _make_parser_function('read_csv', sep=',') read_csv = Appender(_read_csv_doc)(read_csv) read_table = _make_parser_function('read_table', sep='\t') read_table = Appender(_read_table_doc)(read_table) @Appender(_read_fwf_doc) def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds): # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds) class TextFileReader(BaseIterator): """ Passed dialect overrides any of the related parser options """ def __init__(self, f, engine=None, **kwds): self.f = f if engine is not None: engine_specified = True else: engine = 'python' engine_specified = False self._engine_specified = kwds.get('engine_specified', engine_specified) if kwds.get('dialect') is not None: dialect = kwds['dialect'] if dialect in csv.list_dialects(): dialect = csv.get_dialect(dialect) kwds['delimiter'] = dialect.delimiter kwds['doublequote'] = dialect.doublequote kwds['escapechar'] = dialect.escapechar kwds['skipinitialspace'] = dialect.skipinitialspace kwds['quotechar'] = dialect.quotechar kwds['quoting'] = dialect.quoting if kwds.get('header', 'infer') == 'infer': kwds['header'] = 0 if kwds.get('names') is None else None self.orig_options = kwds # miscellanea self.engine = engine self._engine = None options = self._get_options_with_defaults(engine) self.chunksize = options.pop('chunksize', None) self.squeeze = options.pop('squeeze', False) # might mutate self.engine self.options, self.engine = self._clean_options(options, engine) if 'has_index_names' in kwds: self.options['has_index_names'] = kwds['has_index_names'] self._make_engine(self.engine) def close(self): try: self._engine._reader.close() except: pass def _get_options_with_defaults(self, engine): kwds = self.orig_options options = {} for argname, default in compat.iteritems(_parser_defaults): options[argname] = kwds.get(argname, default) for argname, default in compat.iteritems(_c_parser_defaults): if argname in kwds: value = kwds[argname] if engine != 'c' and value != default: if ('python' in engine and argname not in _python_unsupported): pass else: raise ValueError( 'The %r option is not supported with the' ' %r engine' % (argname, engine)) else: value = default options[argname] = value if engine == 'python-fwf': for argname, default in compat.iteritems(_fwf_defaults): options[argname] = kwds.get(argname, default) return options def _clean_options(self, options, engine): result = options.copy() engine_specified = self._engine_specified fallback_reason = None sep = options['delimiter'] delim_whitespace = options['delim_whitespace'] # C engine not supported yet if engine == 'c': if options['skip_footer'] > 0: fallback_reason = "the 'c' engine does not support"\ " skip_footer" engine = 'python' if sep is None and not delim_whitespace: if engine == 'c': fallback_reason = "the 'c' engine does not support"\ " sep=None with delim_whitespace=False" engine = 'python' elif sep is not None and len(sep) > 1: if engine == 'c' and sep == '\s+': result['delim_whitespace'] = True del result['delimiter'] elif engine not in ('python', 'python-fwf'): # wait until regex engine integrated fallback_reason = "the 'c' engine does not support"\ " regex separators (separators > 1 char and"\ " different from '\s+' are"\ " interpreted as regex)" engine = 'python' elif delim_whitespace: if 'python' in engine: result['delimiter'] = '\s+' if fallback_reason and engine_specified: raise ValueError(fallback_reason) if engine == 'c': for arg in _c_unsupported: del result[arg] if 'python' in engine: for arg in _python_unsupported: if fallback_reason and result[arg] != _c_parser_defaults[arg]: msg = ("Falling back to the 'python' engine because" " {reason}, but this causes {option!r} to be" " ignored as it is not supported by the 'python'" " engine.").format(reason=fallback_reason, option=arg) if arg == 'dtype': msg += " (Note the 'converters' option provides"\ " similar functionality.)" raise ValueError(msg) del result[arg] if fallback_reason: warnings.warn(("Falling back to the 'python' engine because" " {0}; you can avoid this warning by specifying" " engine='python'.").format(fallback_reason), ParserWarning, stacklevel=5) index_col = options['index_col'] names = options['names'] converters = options['converters'] na_values = options['na_values'] skiprows = options['skiprows'] # really delete this one keep_default_na = result.pop('keep_default_na') _validate_header_arg(options['header']) if index_col is True: raise ValueError("The value of index_col couldn't be 'True'") if _is_index_col(index_col): if not isinstance(index_col, (list, tuple, np.ndarray)): index_col = [index_col] result['index_col'] = index_col names = list(names) if names is not None else names # type conversion-related if converters is not None: if not isinstance(converters, dict): raise TypeError('Type converters must be a dict or' ' subclass, input was ' 'a {0!r}'.format(type(converters).__name__)) else: converters = {} # Converting values to NA na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) # handle skiprows; this is internally handled by the # c-engine, so only need for python parsers if engine != 'c': if com.is_integer(skiprows): skiprows = lrange(skiprows) skiprows = set() if skiprows is None else set(skiprows) # put stuff back result['names'] = names result['converters'] = converters result['na_values'] = na_values result['na_fvalues'] = na_fvalues result['skiprows'] = skiprows return result, engine def __next__(self): return self.get_chunk() def _make_engine(self, engine='c'): if engine == 'c': self._engine = CParserWrapper(self.f, **self.options) else: if engine == 'python': klass = PythonParser elif engine == 'python-fwf': klass = FixedWidthFieldParser self._engine = klass(self.f, **self.options) def _failover_to_python(self): raise AbstractMethodError(self) def read(self, nrows=None): if nrows is not None: if self.options.get('skip_footer'): raise ValueError('skip_footer not supported for iteration') ret = self._engine.read(nrows) if self.options.get('as_recarray'): return ret # May alter columns / col_dict index, columns, col_dict = self._create_index(ret) df = DataFrame(col_dict, columns=columns, index=index) if self.squeeze and len(df.columns) == 1: return df[df.columns[0]].copy() return df def _create_index(self, ret): index, columns, col_dict = ret return index, columns, col_dict def get_chunk(self, size=None): if size is None: size = self.chunksize return self.read(nrows=size) def _is_index_col(col): return col is not None and col is not False def _validate_usecols_arg(usecols): """ Check whether or not the 'usecols' parameter contains all integers (column selection by index) or strings (column by name). Raises a ValueError if that is not the case. """ if usecols is not None: usecols_dtype = lib.infer_dtype(usecols) if usecols_dtype not in ('integer', 'string'): raise ValueError(("The elements of 'usecols' " "must either be all strings " "or all integers")) return usecols def _validate_parse_dates_arg(parse_dates): """ Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. """ msg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") if parse_dates is not None: if lib.isscalar(parse_dates): if not lib.is_bool(parse_dates): raise TypeError(msg) elif not isinstance(parse_dates, (list, dict)): raise TypeError(msg) return parse_dates class ParserBase(object): def __init__(self, kwds): self.names = kwds.get('names') self.orig_names = None self.prefix = kwds.pop('prefix', None) self.index_col = kwds.get('index_col', None) self.index_names = None self.col_names = None self.parse_dates = _validate_parse_dates_arg( kwds.pop('parse_dates', False)) self.date_parser = kwds.pop('date_parser', None) self.dayfirst = kwds.pop('dayfirst', False) self.keep_date_col = kwds.pop('keep_date_col', False) self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.tupleize_cols = kwds.get('tupleize_cols', False) self.infer_datetime_format = kwds.pop('infer_datetime_format', False) self._date_conv = _make_date_converter( date_parser=self.date_parser, dayfirst=self.dayfirst, infer_datetime_format=self.infer_datetime_format ) # validate header options for mi self.header = kwds.get('header') if isinstance(self.header, (list, tuple, np.ndarray)): if kwds.get('as_recarray'): raise ValueError("cannot specify as_recarray when " "specifying a multi-index header") if kwds.get('usecols'): raise ValueError("cannot specify usecols when " "specifying a multi-index header") if kwds.get('names'): raise ValueError("cannot specify names when " "specifying a multi-index header") # validate index_col that only contains integers if self.index_col is not None: is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray)) if not (is_sequence and all(map(com.is_integer, self.index_col)) or com.is_integer(self.index_col)): raise ValueError("index_col must only contain row numbers " "when specifying a multi-index header") self._name_processed = False self._first_chunk = True def close(self): self._reader.close() @property def _has_complex_date_col(self): return (isinstance(self.parse_dates, dict) or (isinstance(self.parse_dates, list) and len(self.parse_dates) > 0 and isinstance(self.parse_dates[0], list))) def _should_parse_dates(self, i): if isinstance(self.parse_dates, bool): return self.parse_dates else: name = self.index_names[i] j = self.index_col[i] if lib.isscalar(self.parse_dates): return (j == self.parse_dates) or (name == self.parse_dates) else: return (j in self.parse_dates) or (name in self.parse_dates) def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: return header[0], index_names, col_names, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) # clean the index_names index_names = header.pop(-1) index_names, names, index_col = _clean_index_names(index_names, self.index_col) # extract the columns field_count = len(header[0]) def extract(r): return tuple([r[i] for i in range(field_count) if i not in sic]) columns = lzip(*[extract(r) for r in header]) names = ic + columns def tostr(x): return str(x) if not isinstance(x, compat.string_types) else x # if we find 'Unnamed' all of a single level, then our header was too # long for n in range(len(columns[0])): if all(['Unnamed' in tostr(c[n]) for c in columns]): raise CParserError( "Passed header=[%s] are too many rows for this " "multi_index of columns" % ','.join([str(x) for x in self.header]) ) # clean the column names (if we have an index_col) if len(ic): col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None for r in header] else: col_names = [None] * len(header) passed_names = True return names, index_names, col_names, passed_names def _maybe_make_multi_index_columns(self, columns, col_names=None): # possibly create a column mi here if (not self.tupleize_cols and len(columns) and not isinstance(columns, MultiIndex) and all([isinstance(c, tuple) for c in columns])): columns = MultiIndex.from_tuples(columns, names=col_names) return columns def _make_index(self, data, alldata, columns, indexnamerow=False): if not _is_index_col(self.index_col) or not self.index_col: index = None elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, self.index_col) = _clean_index_names(list(columns), self.index_col) self._name_processed = True index = self._get_complex_date_index(data, columns) index = self._agg_index(index, try_parse_dates=False) # add names for the index if indexnamerow: coffset = len(indexnamerow) - len(columns) index = index.set_names(indexnamerow[:coffset]) # maybe create a mi on the columns columns = self._maybe_make_multi_index_columns(columns, self.col_names) return index, columns _implicit_index = False def _get_simple_index(self, data, columns): def ix(col): if not isinstance(col, compat.string_types): return col raise ValueError('Index %s invalid' % col) index = None to_remove = [] index = [] for idx in self.index_col: i = ix(idx) to_remove.append(i) index.append(data[i]) # remove index items from content and columns, don't pop in # loop for i in reversed(sorted(to_remove)): data.pop(i) if not self._implicit_index: columns.pop(i) return index def _get_complex_date_index(self, data, col_names): def _get_name(icol): if isinstance(icol, compat.string_types): return icol if col_names is None: raise ValueError(('Must supply column order to use %s as ' 'index') % str(icol)) for i, c in enumerate(col_names): if i == icol: return c index = None to_remove = [] index = [] for idx in self.index_col: name = _get_name(idx) to_remove.append(name) index.append(data[name]) # remove index items from content and columns, don't pop in # loop for c in reversed(sorted(to_remove)): data.pop(c) col_names.remove(c) return index def _agg_index(self, index, try_parse_dates=True): arrays = [] for i, arr in enumerate(index): if (try_parse_dates and self._should_parse_dates(i)): arr = self._date_conv(arr) col_na_values = self.na_values col_na_fvalues = self.na_fvalues if isinstance(self.na_values, dict): col_name = self.index_names[i] if col_name is not None: col_na_values, col_na_fvalues = _get_na_values( col_name, self.na_values, self.na_fvalues) arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues) arrays.append(arr) index = MultiIndex.from_arrays(arrays, names=self.index_names) return index def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, converters=None): result = {} for c, values in compat.iteritems(dct): conv_f = None if converters is None else converters.get(c, None) col_na_values, col_na_fvalues = _get_na_values(c, na_values, na_fvalues) coerce_type = True if conv_f is not None: try: values = lib.map_infer(values, conv_f) except ValueError: mask = lib.ismember(values, na_values).view(np.uint8) values = lib.map_infer_mask(values, conv_f, mask) coerce_type = False cvals, na_count = self._convert_types( values, set(col_na_values) | col_na_fvalues, coerce_type) result[c] = cvals if verbose and na_count: print('Filled %d NA values in column %s' % (na_count, str(c))) return result def _convert_types(self, values, na_values, try_num_bool=True): na_count = 0 if issubclass(values.dtype.type, (np.number, np.bool_)): mask = lib.ismember(values, na_values) na_count = mask.sum() if na_count > 0: if com.is_integer_dtype(values): values = values.astype(np.float64) np.putmask(values, mask, np.nan) return values, na_count if try_num_bool: try: result = lib.maybe_convert_numeric(values, na_values, False) except Exception: result = values if values.dtype == np.object_: na_count = lib.sanitize_objects(result, na_values, False) else: result = values if values.dtype == np.object_: na_count = lib.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: result = lib.maybe_convert_bool(values, true_values=self.true_values, false_values=self.false_values) return result, na_count def _do_date_conversions(self, names, data): # returns data, columns if self.parse_dates is not None: data, names = _process_date_conversion( data, self._date_conv, self.parse_dates, self.index_col, self.index_names, names, keep_date_col=self.keep_date_col) return names, data class CParserWrapper(ParserBase): """ """ def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() self.as_recarray = kwds.get('as_recarray', False) ParserBase.__init__(self, kwds) if 'utf-16' in (kwds.get('encoding') or ''): if isinstance(src, compat.string_types): src = open(src, 'rb') src = UTF8Recoder(src, kwds['encoding']) kwds['encoding'] = 'utf-8' # #2442 kwds['allow_leading_cols'] = self.index_col is not False self._reader = _parser.TextReader(src, **kwds) # XXX self.usecols = _validate_usecols_arg(self._reader.usecols) passed_names = self.names is None if self._reader.header is None: self.names = None else: if len(self._reader.header) > 1: # we have a multi index in the columns self.names, self.index_names, self.col_names, passed_names = ( self._extract_multi_indexer_columns( self._reader.header, self.index_names, self.col_names, passed_names ) ) else: self.names = list(self._reader.header[0]) if self.names is None: if self.prefix: self.names = ['%s%d' % (self.prefix, i) for i in range(self._reader.table_width)] else: self.names = lrange(self._reader.table_width) # gh-9755 # # need to set orig_names here first # so that proper indexing can be done # with _set_noconvert_columns # # once names has been filtered, we will # then set orig_names again to names self.orig_names = self.names[:] if self.usecols: if len(self.names) > len(self.usecols): self.names = [n for i, n in enumerate(self.names) if (i in self.usecols or n in self.usecols)] if len(self.names) < len(self.usecols): raise ValueError("Usecols do not match names.") self._set_noconvert_columns() self.orig_names = self.names if not self._has_complex_date_col: if (self._reader.leading_cols == 0 and _is_index_col(self.index_col)): self._name_processed = True (index_names, self.names, self.index_col) = _clean_index_names(self.names, self.index_col) if self.index_names is None: self.index_names = index_names if self._reader.header is None and not passed_names: self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0 def _set_noconvert_columns(self): names = self.orig_names usecols = self.usecols def _set(x): if usecols and com.is_integer(x): x = list(usecols)[x] if not com.is_integer(x): x = names.index(x) self._reader.set_noconvert(x) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) def set_error_bad_lines(self, status): self._reader.set_error_bad_lines(int(status)) def read(self, nrows=None): try: data = self._reader.read(nrows) except StopIteration: if self._first_chunk: self._first_chunk = False index, columns, col_dict = _get_empty_meta( self.orig_names, self.index_col, self.index_names, dtype=self.kwds.get('dtype')) if self.usecols is not None: columns = self._filter_usecols(columns) col_dict = dict(filter(lambda item: item[0] in columns, col_dict.items())) return index, columns, col_dict else: raise # Done with first read, next time raise StopIteration self._first_chunk = False if self.as_recarray: # what to do if there are leading columns? return data names = self.names if self._reader.leading_cols: if self._has_complex_date_col: raise NotImplementedError('file structure not yet supported') # implicit index, no index names arrays = [] for i in range(self._reader.leading_cols): if self.index_col is None: values = data.pop(i) else: values = data.pop(self.index_col[i]) values = self._maybe_parse_dates(values, i, try_parse_dates=True) arrays.append(values) index = MultiIndex.from_arrays(arrays) if self.usecols is not None: names = self._filter_usecols(names) # rename dict keys data = sorted(data.items()) data = dict((k, v) for k, (i, v) in zip(names, data)) names, data = self._do_date_conversions(names, data) else: # rename dict keys data = sorted(data.items()) # ugh, mutation names = list(self.orig_names) if self.usecols is not None: names = self._filter_usecols(names) # columns as list alldata = [x[1] for x in data] data = dict((k, v) for k, (i, v) in zip(names, data)) names, data = self._do_date_conversions(names, data) index, names = self._make_index(data, alldata, names) # maybe create a mi on the columns names = self._maybe_make_multi_index_columns(names, self.col_names) return index, names, data def _filter_usecols(self, names): # hackish if self.usecols is not None and len(names) != len(self.usecols): names = [name for i, name in enumerate(names) if i in self.usecols or name in self.usecols] return names def _get_index_names(self): names = list(self._reader.header[0]) idx_names = None if self._reader.leading_cols == 0 and self.index_col is not None: (idx_names, names, self.index_col) = _clean_index_names(names, self.index_col) return names, idx_names def _maybe_parse_dates(self, values, index, try_parse_dates=True): if try_parse_dates and self._should_parse_dates(index): values = self._date_conv(values) return values def TextParser(*args, **kwds): """ Converts lists of lists/tuples into DataFrames with proper type inference and optional (e.g. string to datetime) conversion. Also enables iterating lazily over chunks of large files Parameters ---------- data : file-like object or list delimiter : separator character to use dialect : str or csv.Dialect instance, default None Ignored if delimiter is longer than 1 character names : sequence, default header : int, default 0 Row to use to parse column labels. Defaults to the first row. Prior rows will be discarded index_col : int or list, default None Column or columns to use as the (possibly hierarchical) index has_index_names: boolean, default False True if the cols defined in index_col have an index name and are not in the header na_values : iterable, default None Custom NA values keep_default_na : bool, default True thousands : str, default None Thousands separator comment : str, default None Comment out remainder of line parse_dates : boolean, default False keep_date_col : boolean, default False date_parser : function, default None skiprows : list of integers Row numbers to skip skip_footer : int Number of line at bottom of file to skip converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. encoding : string, default None Encoding to use for UTF when reading/writing (ex. 'utf-8') squeeze : boolean, default False returns Series if only one column infer_datetime_format: boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. float_precision : string, default None Specifies which converter the C engine should use for floating-point values. The options are None for the ordinary converter, 'high' for the high-precision converter, and 'round_trip' for the round-trip converter. """ kwds['engine'] = 'python' return TextFileReader(*args, **kwds) def count_empty_vals(vals): return sum([1 for v in vals if v == '' or v is None]) def _wrap_compressed(f, compression, encoding=None): """wraps compressed fileobject in a decompressing fileobject NOTE: For all files in Python 3.2 and for bzip'd files under all Python versions, this means reading in the entire file and then re-wrapping it in StringIO. """ compression = compression.lower() encoding = encoding or get_option('display.encoding') if compression == 'gzip': import gzip f = gzip.GzipFile(fileobj=f) if compat.PY3: from io import TextIOWrapper f = TextIOWrapper(f) return f elif compression == 'bz2': import bz2 if compat.PY3: f = bz2.open(f, 'rt', encoding=encoding) else: # Python 2's bz2 module can't take file objects, so have to # run through decompress manually data = bz2.decompress(f.read()) f = StringIO(data) return f elif compression == 'zip': import zipfile zip_file = zipfile.ZipFile(f) zip_names = zip_file.namelist() if len(zip_names) == 1: file_name = zip_names.pop() f = zip_file.open(file_name) return f elif len(zip_names) == 0: raise ValueError('Corrupted or zero files found in compressed ' 'zip file %s', zip_file.filename) else: raise ValueError('Multiple files found in compressed ' 'zip file %s', str(zip_names)) elif compression == 'xz': lzma = compat.import_lzma() f = lzma.LZMAFile(f) if compat.PY3: from io import TextIOWrapper f = TextIOWrapper(f) return f else: raise ValueError('do not recognize compression method %s' % compression) class PythonParser(ParserBase): def __init__(self, f, **kwds): """ Workhorse function for processing nested list into DataFrame Should be replaced by np.genfromtxt eventually? """ ParserBase.__init__(self, kwds) self.data = None self.buf = [] self.pos = 0 self.line_pos = 0 self.encoding = kwds['encoding'] self.compression = kwds['compression'] self.skiprows = kwds['skiprows'] self.skip_footer = kwds['skip_footer'] self.delimiter = kwds['delimiter'] self.quotechar = kwds['quotechar'] self.escapechar = kwds['escapechar'] self.doublequote = kwds['doublequote'] self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True) self.usecols = _validate_usecols_arg(kwds['usecols']) self.skip_blank_lines = kwds['skip_blank_lines'] self.names_passed = kwds['names'] or None self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] self.verbose = kwds['verbose'] self.converters = kwds['converters'] self.thousands = kwds['thousands'] self.comment = kwds['comment'] self._comment_lines = [] if isinstance(f, compat.string_types): f = _get_handle(f, 'r', encoding=self.encoding, compression=self.compression) elif self.compression: f = _wrap_compressed(f, self.compression, self.encoding) # in Python 3, convert BytesIO or fileobjects passed with an encoding elif compat.PY3 and isinstance(f, compat.BytesIO): from io import TextIOWrapper f = TextIOWrapper(f, encoding=self.encoding) # Set self.data to something that can read lines. if hasattr(f, 'readline'): self._make_reader(f) else: self.data = f # Get columns in two steps: infer from data, then # infer column indices from self.usecols if is is specified. self._col_indices = None self.columns, self.num_original_columns = self._infer_columns() # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. if len(self.columns) > 1: # we are processing a multi index column self.columns, self.index_names, self.col_names, _ = ( self._extract_multi_indexer_columns( self.columns, self.index_names, self.col_names ) ) # Update list of original names to include all indices. self.num_original_columns = len(self.columns) else: self.columns = self.columns[0] # get popped off for index self.orig_names = list(self.columns) # needs to be cleaned/refactored # multiple date column thing turning into a real spaghetti factory if not self._has_complex_date_col: (index_names, self.orig_names, self.columns) = ( self._get_index_name(self.columns)) self._name_processed = True if self.index_names is None: self.index_names = index_names if self.parse_dates: self._no_thousands_columns = self._set_no_thousands_columns() else: self._no_thousands_columns = None def _set_no_thousands_columns(self): # Create a set of column ids that are not to be stripped of thousands # operators. noconvert_columns = set() def _set(x): if com.is_integer(x): noconvert_columns.add(x) else: noconvert_columns.add(self.columns.index(x)) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) return noconvert_columns def _make_reader(self, f): sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError('Custom line terminators not supported in ' 'python parser (yet)') class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = '\n' dia = MyDialect sniff_sep = True if sep is not None: sniff_sep = False dia.delimiter = sep # attempt to sniff the delimiter if sniff_sep: line = f.readline() while self.pos in self.skiprows: self.pos += 1 line = f.readline() line = self._check_comments([line])[0] self.pos += 1 self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter if self.encoding is not None: self.buf.extend(list( UnicodeReader(StringIO(line), dialect=dia, encoding=self.encoding))) else: self.buf.extend(list(csv.reader(StringIO(line), dialect=dia))) if self.encoding is not None: reader = UnicodeReader(f, dialect=dia, encoding=self.encoding, strict=True) else: reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = next(f) pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() self.data = reader def read(self, rows=None): try: content = self._get_lines(rows) except StopIteration: if self._first_chunk: content = [] else: raise # done with first read, next time raise StopIteration self._first_chunk = False columns = list(self.orig_names) if not len(content): # pragma: no cover # DataFrame with the right metadata, even though it's length 0 return _get_empty_meta(self.orig_names, self.index_col, self.index_names) # handle new style for names in index count_empty_content_vals = count_empty_vals(content[0]) indexnamerow = None if self.has_index_names and count_empty_content_vals == len(columns): indexnamerow = content[0] content = content[1:] alldata = self._rows_to_cols(content) data = self._exclude_implicit_index(alldata) columns, data = self._do_date_conversions(self.columns, data) data = self._convert_data(data) index, columns = self._make_index(data, alldata, columns, indexnamerow) return index, columns, data def _exclude_implicit_index(self, alldata): if self._implicit_index: excl_indices = self.index_col data = {} offset = 0 for i, col in enumerate(self.orig_names): while i + offset in excl_indices: offset += 1 data[col] = alldata[i + offset] else: data = dict((k, v) for k, v in zip(self.orig_names, alldata)) return data # legacy def get_chunk(self, size=None): if size is None: size = self.chunksize return self.read(nrows=size) def _convert_data(self, data): # apply converters clean_conv = {} for col, f in compat.iteritems(self.converters): if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean_conv[col] = f return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues, self.verbose, clean_conv) def _infer_columns(self): names = self.names num_original_columns = 0 clear_buffer = True if self.header is not None: header = self.header # we have a mi columns, so read an extra line if isinstance(header, (list, tuple, np.ndarray)): have_mi_columns = True header = list(header) + [header[-1] + 1] else: have_mi_columns = False header = [header] columns = [] for level, hr in enumerate(header): try: line = self._buffered_line() while self.line_pos <= hr: line = self._next_line() except StopIteration: if self.line_pos < hr: raise ValueError( 'Passed header=%s but only %d lines in file' % (hr, self.line_pos + 1)) # We have an empty file, so check # if columns are provided. That will # serve as the 'line' for parsing if not self.names: raise EmptyDataError( "No columns to parse from file") line = self.names[:] unnamed_count = 0 this_columns = [] for i, c in enumerate(line): if c == '': if have_mi_columns: this_columns.append('Unnamed: %d_level_%d' % (i, level)) else: this_columns.append('Unnamed: %d' % i) unnamed_count += 1 else: this_columns.append(c) if not have_mi_columns and self.mangle_dupe_cols: counts = {} for i, col in enumerate(this_columns): cur_count = counts.get(col, 0) if cur_count > 0: this_columns[i] = '%s.%d' % (col, cur_count) counts[col] = cur_count + 1 elif have_mi_columns: # if we have grabbed an extra line, but its not in our # format so save in the buffer, and create an blank extra # line for the rest of the parsing code if hr == header[-1]: lc = len(this_columns) ic = (len(self.index_col) if self.index_col is not None else 0) if lc != unnamed_count and lc - ic > unnamed_count: clear_buffer = False this_columns = [None] * lc self.buf = [self.buf[-1]] columns.append(this_columns) if len(columns) == 1: num_original_columns = len(this_columns) if clear_buffer: self._clear_buffer() if names is not None: if ((self.usecols is not None and len(names) != len(self.usecols)) or (self.usecols is None and len(names) != len(columns[0]))): raise ValueError('Number of passed names did not match ' 'number of header fields in the file') if len(columns) > 1: raise TypeError('Cannot pass names with multi-index ' 'columns') if self.usecols is not None: # Set _use_cols. We don't store columns because they are # overwritten. self._handle_usecols(columns, names) else: self._col_indices = None num_original_columns = len(names) columns = [names] else: columns = self._handle_usecols(columns, columns[0]) else: try: line = self._buffered_line() except StopIteration: if not names: raise EmptyDataError( "No columns to parse from file") line = names[:] ncols = len(line) num_original_columns = ncols if not names: if self.prefix: columns = [['%s%d' % (self.prefix, i) for i in range(ncols)]] else: columns = [lrange(ncols)] columns = self._handle_usecols(columns, columns[0]) else: if self.usecols is None or len(names) == num_original_columns: columns = self._handle_usecols([names], names) num_original_columns = len(names) else: if self.usecols and len(names) != len(self.usecols): raise ValueError( 'Number of passed names did not match number of ' 'header fields in the file' ) # Ignore output but set used columns. self._handle_usecols([names], names) columns = [names] num_original_columns = ncols return columns, num_original_columns def _handle_usecols(self, columns, usecols_key): """ Sets self._col_indices usecols_key is used if there are string usecols. """ if self.usecols is not None: if any([isinstance(u, string_types) for u in self.usecols]): if len(columns) > 1: raise ValueError("If using multiple headers, usecols must " "be integers.") col_indices = [] for u in self.usecols: if isinstance(u, string_types): col_indices.append(usecols_key.index(u)) else: col_indices.append(u) else: col_indices = self.usecols columns = [[n for i, n in enumerate(column) if i in col_indices] for column in columns] self._col_indices = col_indices return columns def _buffered_line(self): """ Return a line from buffer, filling buffer if required. """ if len(self.buf) > 0: return self.buf[0] else: return self._next_line() def _empty(self, line): return not line or all(not x for x in line) def _next_line(self): if isinstance(self.data, list): while self.pos in self.skiprows: self.pos += 1 while True: try: line = self._check_comments([self.data[self.pos]])[0] self.pos += 1 # either uncommented or blank to begin with if not self.skip_blank_lines and (self._empty(self.data[ self.pos - 1]) or line): break elif self.skip_blank_lines: ret = self._check_empty([line]) if ret: line = ret[0] break except IndexError: raise StopIteration else: while self.pos in self.skiprows: self.pos += 1 next(self.data) while True: orig_line = next(self.data) line = self._check_comments([orig_line])[0] self.pos += 1 if (not self.skip_blank_lines and (self._empty(orig_line) or line)): break elif self.skip_blank_lines: ret = self._check_empty([line]) if ret: line = ret[0] break self.line_pos += 1 self.buf.append(line) return line def _check_comments(self, lines): if self.comment is None: return lines ret = [] for l in lines: rl = [] for x in l: if (not isinstance(x, compat.string_types) or self.comment not in x): rl.append(x) else: x = x[:x.find(self.comment)] if len(x) > 0: rl.append(x) break ret.append(rl) return ret def _check_empty(self, lines): ret = [] for l in lines: # Remove empty lines and lines with only one whitespace value if (len(l) > 1 or len(l) == 1 and (not isinstance(l[0], compat.string_types) or l[0].strip())): ret.append(l) return ret def _check_thousands(self, lines): if self.thousands is None: return lines nonnum = re.compile('[^-^0-9^%s^.]+' % self.thousands) ret = [] for l in lines: rl = [] for i, x in enumerate(l): if (not isinstance(x, compat.string_types) or self.thousands not in x or (self._no_thousands_columns and i in self._no_thousands_columns) or nonnum.search(x.strip())): rl.append(x) else: rl.append(x.replace(self.thousands, '')) ret.append(rl) return ret def _clear_buffer(self): self.buf = [] _implicit_index = False def _get_index_name(self, columns): """ Try several cases to get lines: 0) There are headers on row 0 and row 1 and their total summed lengths equals the length of the next line. Treat row 0 as columns and row 1 as indices 1) Look for implicit index: there are more columns on row 1 than row 0. If this is true, assume that row 1 lists index columns and row 0 lists normal columns. 2) Get index from the columns if it was listed. """ orig_names = list(columns) columns = list(columns) try: line = self._next_line() except StopIteration: line = None try: next_line = self._next_line() except StopIteration: next_line = None # implicitly index_col=0 b/c 1 fewer column names implicit_first_cols = 0 if line is not None: # leave it 0, #2442 # Case 1 if self.index_col is not False: implicit_first_cols = len(line) - self.num_original_columns # Case 0 if next_line is not None: if len(next_line) == len(line) + self.num_original_columns: # column and index names on diff rows self.index_col = lrange(len(line)) self.buf = self.buf[1:] for c in reversed(line): columns.insert(0, c) # Update list of original names to include all indices. orig_names = list(columns) self.num_original_columns = len(columns) return line, orig_names, columns if implicit_first_cols > 0: # Case 1 self._implicit_index = True if self.index_col is None: self.index_col = lrange(implicit_first_cols) index_name = None else: # Case 2 (index_name, columns_, self.index_col) = _clean_index_names(columns, self.index_col) return index_name, orig_names, columns def _rows_to_cols(self, content): zipped_content = list(lib.to_object_array(content).T) col_len = self.num_original_columns zip_len = len(zipped_content) if self._implicit_index: col_len += len(self.index_col) if self.skip_footer < 0: raise ValueError('skip footer cannot be negative') # Loop through rows to verify lengths are correct. if (col_len != zip_len and self.index_col is not False and self.usecols is None): i = 0 for (i, l) in enumerate(content): if len(l) != col_len: break footers = 0 if self.skip_footer: footers = self.skip_footer row_num = self.pos - (len(content) - i + footers) msg = ('Expected %d fields in line %d, saw %d' % (col_len, row_num + 1, zip_len)) raise ValueError(msg) if self.usecols: if self._implicit_index: zipped_content = [ a for i, a in enumerate(zipped_content) if (i < len(self.index_col) or i - len(self.index_col) in self._col_indices)] else: zipped_content = [a for i, a in enumerate(zipped_content) if i in self._col_indices] return zipped_content def _get_lines(self, rows=None): source = self.data lines = self.buf new_rows = None # already fetched some number if rows is not None: # we already have the lines in the buffer if len(self.buf) >= rows: new_rows, self.buf = self.buf[:rows], self.buf[rows:] # need some lines else: rows -= len(self.buf) if new_rows is None: if isinstance(source, list): if self.pos > len(source): raise StopIteration if rows is None: new_rows = source[self.pos:] new_pos = len(source) else: new_rows = source[self.pos:self.pos + rows] new_pos = self.pos + rows # Check for stop rows. n.b.: self.skiprows is a set. if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) if i + self.pos not in self.skiprows] lines.extend(new_rows) self.pos = new_pos else: new_rows = [] try: if rows is not None: for _ in range(rows): new_rows.append(next(source)) lines.extend(new_rows) else: rows = 0 while True: try: new_rows.append(next(source)) rows += 1 except csv.Error as inst: if 'newline inside string' in str(inst): row_num = str(self.pos + rows) msg = ('EOF inside string starting with ' 'line ' + row_num) raise Exception(msg) raise except StopIteration: if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) if self.pos + i not in self.skiprows] lines.extend(new_rows) if len(lines) == 0: raise self.pos += len(new_rows) self.buf = [] else: lines = new_rows if self.skip_footer: lines = lines[:-self.skip_footer] lines = self._check_comments(lines) if self.skip_blank_lines: lines = self._check_empty(lines) return self._check_thousands(lines) def _make_date_converter(date_parser=None, dayfirst=False, infer_datetime_format=False): def converter(*date_cols): if date_parser is None: strs = _concat_date_cols(date_cols) try: return tools._to_datetime( com._ensure_object(strs), utc=None, box=False, dayfirst=dayfirst, errors='ignore', infer_datetime_format=infer_datetime_format ) except: return tools.to_datetime( lib.try_parse_dates(strs, dayfirst=dayfirst)) else: try: result = tools.to_datetime( date_parser(*date_cols), errors='ignore') if isinstance(result, datetime.datetime): raise Exception('scalar parser') return result except Exception: try: return tools.to_datetime( lib.try_parse_dates(_concat_date_cols(date_cols), parser=date_parser, dayfirst=dayfirst), errors='ignore') except Exception: return generic_parser(date_parser, *date_cols) return converter def _process_date_conversion(data_dict, converter, parse_spec, index_col, index_names, columns, keep_date_col=False): def _isindex(colspec): return ((isinstance(index_col, list) and colspec in index_col) or (isinstance(index_names, list) and colspec in index_names)) new_cols = [] new_data = {} orig_names = columns columns = list(columns) date_cols = set() if parse_spec is None or isinstance(parse_spec, bool): return data_dict, columns if isinstance(parse_spec, list): # list of column lists for colspec in parse_spec: if lib.isscalar(colspec): if isinstance(colspec, int) and colspec not in data_dict: colspec = orig_names[colspec] if _isindex(colspec): continue data_dict[colspec] = converter(data_dict[colspec]) else: new_name, col, old_names = _try_convert_dates( converter, colspec, data_dict, orig_names) if new_name in data_dict: raise ValueError('New date column already in dict %s' % new_name) new_data[new_name] = col new_cols.append(new_name) date_cols.update(old_names) elif isinstance(parse_spec, dict): # dict of new name to column list for new_name, colspec in compat.iteritems(parse_spec): if new_name in data_dict: raise ValueError('Date column %s already in dict' % new_name) _, col, old_names = _try_convert_dates(converter, colspec, data_dict, orig_names) new_data[new_name] = col new_cols.append(new_name) date_cols.update(old_names) data_dict.update(new_data) new_cols.extend(columns) if not keep_date_col: for c in list(date_cols): data_dict.pop(c) new_cols.remove(c) return data_dict, new_cols def _try_convert_dates(parser, colspec, data_dict, columns): colset = set(columns) colnames = [] for c in colspec: if c in colset: colnames.append(c) elif isinstance(c, int) and c not in columns: colnames.append(str(columns[c])) else: colnames.append(c) new_name = '_'.join([str(x) for x in colnames]) to_parse = [data_dict[c] for c in colnames if c in data_dict] new_col = parser(*to_parse) return new_name, new_col, colnames def _clean_na_values(na_values, keep_default_na=True): if na_values is None: if keep_default_na: na_values = _NA_VALUES else: na_values = [] na_fvalues = set() elif isinstance(na_values, dict): if keep_default_na: for k, v in compat.iteritems(na_values): v = set(list(v)) | _NA_VALUES na_values[k] = v na_fvalues = dict([ (k, _floatify_na_values(v)) for k, v in na_values.items() # noqa ]) else: if not com.is_list_like(na_values): na_values = [na_values] na_values = _stringify_na_values(na_values) if keep_default_na: na_values = na_values | _NA_VALUES na_fvalues = _floatify_na_values(na_values) return na_values, na_fvalues def _clean_index_names(columns, index_col): if not _is_index_col(index_col): return None, columns, index_col columns = list(columns) cp_cols = list(columns) index_names = [] # don't mutate index_col = list(index_col) for i, c in enumerate(index_col): if isinstance(c, compat.string_types): index_names.append(c) for j, name in enumerate(cp_cols): if name == c: index_col[i] = j columns.remove(name) break else: name = cp_cols[c] columns.remove(name) index_names.append(name) # hack if isinstance(index_names[0], compat.string_types)\ and 'Unnamed' in index_names[0]: index_names[0] = None return index_names, columns, index_col def _get_empty_meta(columns, index_col, index_names, dtype=None): columns = list(columns) if dtype is None: dtype = {} else: if not isinstance(dtype, dict): dtype = defaultdict(lambda: dtype) # Convert column indexes to column names. dtype = dict((columns[k] if com.is_integer(k) else k, v) for k, v in compat.iteritems(dtype)) if index_col is None or index_col is False: index = Index([]) else: index = [np.empty(0, dtype=dtype.get(index_name, np.object)) for index_name in index_names] index = MultiIndex.from_arrays(index, names=index_names) index_col.sort() for i, n in enumerate(index_col): columns.pop(n - i) col_dict = dict((col_name, np.empty(0, dtype=dtype.get(col_name, np.object))) for col_name in columns) return index, columns, col_dict def _floatify_na_values(na_values): # create float versions of the na_values result = set() for v in na_values: try: v = float(v) if not np.isnan(v): result.add(v) except: pass return result def _stringify_na_values(na_values): """ return a stringified and numeric for these values """ result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here if v == int(v): v = int(v) result.append("%s.0" % v) result.append(str(v)) result.append(v) except: pass try: result.append(int(x)) except: pass return set(result) def _get_na_values(col, na_values, na_fvalues): if isinstance(na_values, dict): if col in na_values: return na_values[col], na_fvalues[col] else: return _NA_VALUES, set() else: return na_values, na_fvalues def _get_col_names(colspec, columns): colset = set(columns) colnames = [] for c in colspec: if c in colset: colnames.append(c) elif isinstance(c, int): colnames.append(columns[c]) return colnames def _concat_date_cols(date_cols): if len(date_cols) == 1: if compat.PY3: return np.array([compat.text_type(x) for x in date_cols[0]], dtype=object) else: return np.array([ str(x) if not isinstance(x, compat.string_types) else x for x in date_cols[0] ], dtype=object) rs = np.array([' '.join([compat.text_type(y) for y in x]) for x in zip(*date_cols)], dtype=object) return rs class FixedWidthReader(BaseIterator): """ A reader of fixed-width lines. """ def __init__(self, f, colspecs, delimiter, comment): self.f = f self.buffer = None self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t ' self.comment = comment if colspecs == 'infer': self.colspecs = self.detect_colspecs() else: self.colspecs = colspecs if not isinstance(self.colspecs, (tuple, list)): raise TypeError("column specifications must be a list or tuple, " "input was a %r" % type(colspecs).__name__) for colspec in self.colspecs: if not (isinstance(colspec, (tuple, list)) and len(colspec) == 2 and isinstance(colspec[0], (int, np.integer, type(None))) and isinstance(colspec[1], (int, np.integer, type(None)))): raise TypeError('Each column specification must be ' '2 element tuple or list of integers') def get_rows(self, n): rows = [] for i, row in enumerate(self.f, 1): rows.append(row) if i >= n: break self.buffer = iter(rows) return rows def detect_colspecs(self, n=100): # Regex escape the delimiters delimiters = ''.join([r'\%s' % x for x in self.delimiter]) pattern = re.compile('([^%s]+)' % delimiters) rows = self.get_rows(n) max_len = max(map(len, rows)) mask = np.zeros(max_len + 1, dtype=int) if self.comment is not None: rows = [row.partition(self.comment)[0] for row in rows] for row in rows: for m in pattern.finditer(row): mask[m.start():m.end()] = 1 shifted = np.roll(mask, 1) shifted[0] = 0 edges = np.where((mask ^ shifted) == 1)[0] return list(zip(edges[::2], edges[1::2])) def __next__(self): if self.buffer is not None: try: line = next(self.buffer) except StopIteration: self.buffer = None line = next(self.f) else: line = next(self.f) # Note: 'colspecs' is a sequence of half-open intervals. return [line[fromm:to].strip(self.delimiter) for (fromm, to) in self.colspecs] class FixedWidthFieldParser(PythonParser): """ Specialization that Converts fixed-width fields into DataFrames. See PythonParser for details. """ def __init__(self, f, **kwds): # Support iterators, convert to a list. self.colspecs = kwds.pop('colspecs') PythonParser.__init__(self, f, **kwds) def _make_reader(self, f): self.data = FixedWidthReader(f, self.colspecs, self.delimiter, self.comment)
mit
JPFrancoia/scikit-learn
sklearn/ensemble/tests/test_bagging.py
43
28175
""" Testing for the bagging ensemble module (sklearn.ensemble.bagging). """ # Author: Gilles Louppe # License: BSD 3 clause import numpy as np from sklearn.base import BaseEstimator from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.model_selection import GridSearchCV, ParameterGrid from sklearn.ensemble import BaggingClassifier, BaggingRegressor from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.svm import SVC, SVR from sklearn.pipeline import make_pipeline from sklearn.feature_selection import SelectKBest from sklearn.model_selection import train_test_split from sklearn.datasets import load_boston, load_iris, make_hastie_10_2 from sklearn.utils import check_random_state from scipy.sparse import csc_matrix, csr_matrix rng = check_random_state(0) # also load the iris dataset # and randomly permute it iris = load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_classification(): # Check classification for various parameter settings on sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVC, self).fit(X, y) self.data_type_ = type(X) return self rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']: # Trained on sparse format sparse_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = getattr(sparse_classifier, f)(X_test_sparse) # Trained on dense format dense_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train, y_train) dense_results = getattr(dense_classifier, f)(X_test) assert_array_equal(sparse_results, dense_results) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([t == sparse_type for t in types]) def test_regression(): # Check regression for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [0.5, 1.0], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyRegressor(), DecisionTreeRegressor(), KNeighborsRegressor(), SVR()]: for params in grid: BaggingRegressor(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_regression(): # Check regression for various parameter settings on sparse input. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) class CustomSVR(SVR): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVR, self).fit(X, y) self.data_type_ = type(X) return self parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: # Trained on sparse format sparse_classifier = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_results = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train, y_train).predict(X_test) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert_array_equal(sparse_results, dense_results) assert all([t == sparse_type for t in types]) assert_array_equal(sparse_results, dense_results) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) base_estimator = DecisionTreeRegressor().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=False, random_state=rng).fit(X_train, y_train) assert_equal(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) # with bootstrap, trees are no longer perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=True, random_state=rng).fit(X_train, y_train) assert_greater(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=False, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_equal(boston.data.shape[1], np.unique(features).shape[0]) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=True, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_greater(boston.data.shape[1], np.unique(features).shape[0]) def test_probability(): # Predict probabilities. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(), random_state=rng).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) # Degenerate case, where some classes are missing ensemble = BaggingClassifier(base_estimator=LogisticRegression(), random_state=rng, max_samples=5).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) for base_estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier(base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingClassifier(base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_oob_score_regression(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=50, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_single_estimator(): # Check singleton ensembles. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=rng).fit(X_train, y_train) clf2 = KNeighborsRegressor().fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) def test_error(): # Test that it gives proper exception on deficient input. X, y = iris.data, iris.target base = DecisionTreeClassifier() # Test max_samples assert_raises(ValueError, BaggingClassifier(base, max_samples=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=1000).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples="foobar").fit, X, y) # Test max_features assert_raises(ValueError, BaggingClassifier(base, max_features=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=5).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features="foobar").fit, X, y) # Test support of decision_function assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function')) def test_parallel_classification(): # Check parallel classification. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) # predict_proba ensemble.set_params(n_jobs=1) y1 = ensemble.predict_proba(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y3) # decision_function ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) decisions1 = ensemble.decision_function(X_test) ensemble.set_params(n_jobs=2) decisions2 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions2) ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=1, random_state=0).fit(X_train, y_train) decisions3 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions3) def test_parallel_regression(): # Check parallel regression. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) y1 = ensemble.predict(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict(X_test) assert_array_almost_equal(y1, y3) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {'n_estimators': (1, 2), 'base_estimator__C': (1, 2)} GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, Perceptron)) # Regression X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, SVR)) def test_bagging_with_pipeline(): estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2) estimator.fit(iris.data, iris.target) assert_true(isinstance(estimator[0].steps[-1][1].random_state, int)) class DummyZeroEstimator(BaseEstimator): def fit(self, X, y): self.classes_ = np.unique(y) return self def predict(self, X): return self.classes_[np.zeros(X.shape[0], dtype=int)] def test_bagging_sample_weight_unsupported_but_passed(): estimator = BaggingClassifier(DummyZeroEstimator()) rng = check_random_state(0) estimator.fit(iris.data, iris.target).predict(iris.data) assert_raises(ValueError, estimator.fit, iris.data, iris.target, sample_weight=rng.randint(10, size=(iris.data.shape[0]))) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BaggingClassifier(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1. assert_warns_message(UserWarning, "Warm-start fitting without increasing n_estimators does not", clf.fit, X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) assert_raises(ValueError, clf.fit, X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) assert_raises(AttributeError, getattr, clf, "oob_score_") def test_oob_score_consistency(): # Make sure OOB scores are identical when random_state, estimator, and # training data are fixed and fitting is done twice X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5, oob_score=True, random_state=1) assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_) def test_estimators_samples(): # Check that format of estimators_samples_ is correct and that results # generated at fit time can be identically reproduced at a later time # using data saved in object attributes. X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5, max_features=0.5, random_state=1, bootstrap=False) bagging.fit(X, y) # Get relevant attributes estimators_samples = bagging.estimators_samples_ estimators_features = bagging.estimators_features_ estimators = bagging.estimators_ # Test for correct formatting assert_equal(len(estimators_samples), len(estimators)) assert_equal(len(estimators_samples[0]), len(X)) assert_equal(estimators_samples[0].dtype.kind, 'b') # Re-fit single estimator to test for consistent sampling estimator_index = 0 estimator_samples = estimators_samples[estimator_index] estimator_features = estimators_features[estimator_index] estimator = estimators[estimator_index] X_train = (X[estimator_samples])[:, estimator_features] y_train = y[estimator_samples] orig_coefs = estimator.coef_ estimator.fit(X_train, y_train) new_coefs = estimator.coef_ assert_array_almost_equal(orig_coefs, new_coefs) def test_max_samples_consistency(): # Make sure validated max_samples and original max_samples are identical # when valid integer max_samples supplied by user max_samples = 100 X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=max_samples, max_features=0.5, random_state=1) bagging.fit(X, y) assert_equal(bagging._max_samples, max_samples)
bsd-3-clause
josesho/bootstrap_contrast
bootstrap_contrast/plot_tools.py
2
4118
import matplotlib.pyplot as plt import seaborn as sns import numpy as np from .misc_tools import merge_two_dicts def halfviolin(v, half = 'right', color = 'k'): for b in v['bodies']: mVertical = np.mean(b.get_paths()[0].vertices[:, 0]) mHorizontal = np.mean(b.get_paths()[0].vertices[:, 1]) if half is 'left': b.get_paths()[0].vertices[:, 0] = np.clip(b.get_paths()[0].vertices[:, 0], -np.inf, mVertical) if half is 'right': b.get_paths()[0].vertices[:, 0] = np.clip(b.get_paths()[0].vertices[:, 0], mVertical, np.inf) if half is 'bottom': b.get_paths()[0].vertices[:, 1] = np.clip(b.get_paths()[0].vertices[:, 1], -np.inf, mHorizontal) if half is 'top': b.get_paths()[0].vertices[:, 1] = np.clip(b.get_paths()[0].vertices[:, 1], mHorizontal, np.inf) b.set_color(color) def align_yaxis(ax1, v1, ax2, v2): """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1""" # Taken from # http://stackoverflow.com/questions/7630778/matplotlib-align-origin-of-right-axis-with-specific-left-axis-value _, y1 = ax1.transData.transform((0, v1)) _, y2 = ax2.transData.transform((0, v2)) inv = ax2.transData.inverted() _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2)) miny, maxy = ax2.get_ylim() ax2.set_ylim(miny+dy, maxy+dy) def rotate_ticks(axes, angle=45, alignment='right'): for tick in axes.get_xticklabels(): tick.set_rotation(angle) tick.set_horizontalalignment(alignment) def plot_means(data,x,y,ax=None,xwidth=0.5,zorder=1,linestyle_kw=None): """Takes a pandas DataFrame and plots the `y` means of each group in `x` as horizontal lines. Keyword arguments: data: pandas DataFrame. This DataFrame should be in 'wide' format. x,y: string. x and y columns to be plotted. xwidth: float, default 0.5 The horizontal spread of the line. The default is 0.5, which means the mean line will stretch 0.5 (in data coordinates) on both sides of the xtick. zorder: int, default 1 This is the plot order of the means on the axes. See http://matplotlib.org/examples/pylab_examples/zorder_demo.html linestyle_kw: dict, default None Dictionary with kwargs passed to the `meanprops` argument of `plt.boxplot`. """ # Set default linestyle parameters. default_linestyle_kw=dict( linewidth=1.5, color='k', linestyle='-') # If user has specified kwargs for linestyle, merge with default params. if linestyle_kw is None: meanlinestyle_kw=default_linestyle_kw else: meanlinestyle_kw=merge_two_dicts(default_linestyle_kw,linestyle_kw) # Set axes for plotting. if ax is None: ax=plt.gca() # Use sns.boxplot to create the mean lines. sns.boxplot(data=data, x=x,y=y, ax=ax, showmeans=True, meanline=True, showbox=False, showcaps=False, showfliers=False, whis=0, width=xwidth, zorder=int(zorder), meanprops=meanlinestyle_kw, medianprops=dict(linewidth=0) ) def plot_std(data, x, y, offset=0, ax=None, **kwargs): '''Convenience function to plot the standard devations as vertical errorbars.''' if ax is None: ax = plt.gca() keys = kwargs.keys() if 'zorder' not in keys: kwargs['zorder'] = 5 if 'lw' not in keys: kwargs['lw'] = 2.25, if 'color' not in keys: kwargs['color'] = 'k' if 'alpha' not in keys: kwargs['alpha'] = 0.5 num_groups = len(data[x].unique()) ax.errorbar(x=np.array(range(0, num_groups)) + offset, y=data.groupby(x)[y].mean().tolist(), yerr=data.groupby(x)[y].std().tolist(), fmt='none', **kwargs)
mit
PilleniusMC/Pixeldrohne
main.py
1
7098
# Um den gesamten Bot auszuführen muss nur noch die Datei genutzt werden import asyncio import aiohttp import safygiphy from datetime import datetime, time import discord import io import matplotlib.pyplot as plt import matplotlib from discord.ext import commands from custom_commands import CustomCommands import pxldrn import functools import keys import random giphy = safygiphy.Giphy() bot = commands.Bot(command_prefix=keys.prefix, case_insensitive=True) bot.remove_command("help") plt.rcParams.update({'figure.autolayout': True}) matplotlib.rc('xtick', labelsize=10) @bot.event async def on_ready(): print("Bot-Info:\nName: " + bot.user.name + "\nId: " + str(bot.user.id)) await bot.change_presence(activity=discord.Game(f"mit {bot.command_prefix}help")) global st_datetime st_datetime = datetime.now() @bot.command(no_pm=True) async def say(ctx, *, arg): await ctx.message.delete() time = 0.2 * len(arg.split(' ')) async with ctx.channel.typing(): await asyncio.sleep(time) await ctx.channel.send(arg) @say.error async def say_error(ctx, error): if isinstance(error, commands.MissingRequiredArgument): await ctx.message.delete() await ctx.channel.send("Du musst mir schon etwas geben, dass ich sagen kann.", delete_after=3) @bot.command() async def würfel(ctx, augen: int, anzahl: int): if augen <= 100: if anzahl <= 1000000: async with ctx.channel.typing(): def blocking(): liste = [] for i in range(anzahl): rando = random.randint(1, augen) liste.append(rando) return liste partial = functools.partial(blocking) liste = await bot.loop.run_in_executor(None, partial) simple = [] eyes = [] for i in range(augen): count = liste.count(i + 1) eyes.append(i + 1) simple.append(count) plt.bar(eyes, simple, tick_label=eyes) plt.xlabel('Augen') plt.ylabel('Anzahl') plt.title('zufällige Würfel') buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) await ctx.send(file=discord.File(buf, "plot.png")) plt.clf() else: await ctx.send("Why?") else: await ctx.send("Why?") @würfel.error async def würfel_error(ctx, error): if isinstance(error, ValueError): await ctx.send("Mit den Werten kann ich leider nicht arbeiten.") if isinstance(error, commands.BadArgument): await ctx.send("Äh, ich brauche schon Zahlen.") if isinstance(error, commands.MissingRequiredArgument): await ctx.send("Es tut mir leid, aber um zu funktionieren brauche ich die Anzahl an Augen **und** die Anzahl" "der Würfe") @bot.command(no_pm=True) async def avatar(ctx, user: discord.Member): async with aiohttp.ClientSession() as session: async with session.get(user.avatar_url) as resp: img = await resp.read() await ctx.send(file=discord.File(img, 'avatar.gif')) @avatar.error async def avatar_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send("Sorry, den Avatar dieses Nutzers kann ich nicht abrufen") if isinstance(error, commands.MissingRequiredArgument): async with aiohttp.ClientSession() as session: async with session.get(ctx.message.author.avatar_url) as resp: img = await resp.read() await ctx.send(file=discord.File(img, 'avatar.gif')) @bot.command(no_pm=True) async def gif(ctx, *, arg): async with ctx.channel.typing(): rgif = giphy.random(tag=arg) async with aiohttp.ClientSession() as session: async with session.get(str(rgif.get("data", {}).get('image_original_url'))) as resp: rgif = await resp.read() await ctx.send(file=discord.File(rgif, 'gif.gif')) @gif.error async def gif_error(ctx, error): if isinstance(error, commands.CommandInvokeError): await ctx.send("Irgendetwas ist schiefgegangen. Bitte versuche es nochmal") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send("Bitte gib einen Suchbegriff ein.") else: await ctx.send(str(error)) @bot.command(no_pm=True) async def zahl(ctx, z_min: int, z_max: int): await ctx.send(f"Deine Zahl ist: {random.randint(z_min, z_max)}") @zahl.error async def zahl_error(ctx, error): if isinstance(error, commands.MissingRequiredArgument): await ctx.send("Du musst mir zwei Zahlen geben, die erste das Minimum, die zweite das Maximum.") @bot.group() async def zitat(ctx): if ctx.invoked_subcommand is None: async with ctx.channel.typing(): zitat = random.choice(pxldrn.zitate.zitate) await ctx.send(zitat) @zitat.command(name="hidden") async def hidden(ctx): async with ctx.channel.typing(): zitat = random.choice(pxldrn.zitate.zitate) await ctx.send(zitat, delete_after=10) @zitat.command(name="write") async def write(ctx, *, arg): if not arg is None: async with ctx.channel.typing(): channel = bot.get_channel(502539843012657153) await channel.send(f"Zitat von {ctx.message.author.name}: {arg}") await ctx.send(f"Dein Zitat `{arg}` wurde der Liste hinzugefügt", delete_after=10) else: pass @write.error async def write_error(ctx, error): if isinstance(error, commands.MissingRequiredArgument): await ctx.send("Sorry, wenn du kein Zitat angibst, kann ich es auch nicht hinzufügen.") @bot.command(no_pm=True) async def uptime(ctx): result = datetime.now() - st_datetime resultd = datetime.utcfromtimestamp(result.total_seconds()).time() tstring = None if resultd.second == 1: tstring = f"{resultd.second} Sekunde" else: tstring = f"{resultd.second} Sekunden." if resultd.minute > 0: if resultd.minute == 1: tstring = f"{resultd.minute} Minute und " + tstring else: tstring = f"{resultd.minute} Minuten und " + tstring if resultd.hour > 0: if resultd.hour == 1: tstring = f"{resultd.hour} Stunde, " + tstring else: tstring = f"{resultd.hour} Stunden, " + tstring if result.days > 0: if result.days == 1: tstring = f"{result.days} Tag, " + tstring else: tstring = f"{result.days} Tagen " + tstring await ctx.send(f"Der Bot läuft schon seit {tstring}") bot.add_cog(pxldrn.helps.Help(bot)) bot.add_cog(pxldrn.music.Voice(bot)) bot.add_cog(pxldrn.moderation.Mods(bot)) bot.add_cog(pxldrn.moderation.Admin(bot)) bot.add_cog(pxldrn.minigames.SchereSteinPapier(bot)) bot.add_cog(CustomCommands(bot)) bot.add_cog(pxldrn.minigames.Minesweeper(bot)) bot.run(keys.token)
lgpl-2.1
msunardi/PTVS
Python/Product/Analyzer/BuiltinScraperTests.py
18
18954
# ############################################################################ # # Copyright (c) Microsoft Corporation. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ########################################################################### import re import unittest from pprint import pformat from BuiltinScraper import parse_doc_str, BUILTIN, __builtins__, get_overloads_from_doc_string, TOKENS_REGEX try: unicode except NameError: from BuiltinScraper import unicode import sys class Test_BuiltinScraperTests(unittest.TestCase): def check_doc_str(self, doc, module_name, func_name, expected, mod=None, extra_args=[], obj_class=None): r = parse_doc_str(doc, module_name, mod, func_name, extra_args, obj_class) # Quick pass if everything matches if r == expected: return msg = 'Expected:\n%s\nActual\n%s' % (pformat(expected), pformat(r)) self.assertEqual(len(r), len(expected), msg) def check_dict(e, a, indent): if e == a: return missing_keys = set(e.keys()) - set(a.keys()) extra_keys = set(a.keys()) - set(e.keys()) mismatched_keys = [k for k in set(a.keys()) & set(e.keys()) if a[k] != e[k]] if missing_keys: print('%sDid not received following keys: %s' % (indent, ', '.join(missing_keys))) if extra_keys: print('%sDid not expect following keys: %s' % (indent, ', '.join(extra_keys))) for k in mismatched_keys: if isinstance(e[k], dict) and isinstance(a[k], dict): check_dict(e[k], a[k], indent + ' ') elif (isinstance(e[k], tuple) and isinstance(a[k], tuple) or isinstance(e[k], list) and isinstance(a[k], list)): check_seq(e[k], a[k], indent + ' ') else: print('%sExpected "%s": "%s"' % (indent, k, e[k])) print('%sActual "%s": "%s"' % (indent, k, a[k])) print('') def check_seq(e, a, indent): if e == a: return for i, (e2, a2) in enumerate(zip(e, a)): if isinstance(e2, dict) and isinstance(a2, dict): check_dict(e2, a2, indent + ' ') elif (isinstance(e2, tuple) and isinstance(a2, tuple) or isinstance(e2, list) and isinstance(a2, list)): check_seq(e2, a2, indent + ' ') elif e1 != a1: print('%sExpected "%s"' % (indent, e2)) print('%sActual "%s"' % (indent, a2)) print('') for e1, a1 in zip(expected, r): check_dict(e1, a1, '') self.fail(msg) def test_regex(self): self.assertSequenceEqual( [i.strip() for i in re.split(TOKENS_REGEX, 'f(\'\', \'a\', \'a\\\'b\', "", "a", "a\\\"b")') if i.strip()], ['f', '(', "''", ',', "'a'", ',', "'a\\'b'", ',', '""', ',', '"a"', ',', '"a\\"b"', ')'] ) self.assertSequenceEqual( [i.strip() for i in re.split(TOKENS_REGEX, 'f(1, 1., -1, -1.)') if i.strip()], ['f', '(', '1', ',', '1.', ',', '-1', ',', '-1.', ')'] ) self.assertSequenceEqual( [i.strip() for i in re.split(TOKENS_REGEX, 'f(a, *a, **a, ...)') if i.strip()], ['f', '(', 'a', ',', '*', 'a', ',', '**', 'a', ',', '...', ')'] ) self.assertSequenceEqual( [i.strip() for i in re.split(TOKENS_REGEX, 'f(a:123, a=123) --> => ->') if i.strip()], ['f', '(', 'a', ':', '123', ',', 'a', '=', '123', ')', '-->', '=>', '->'] ) def test_numpy_1(self): self.check_doc_str( """arange([start,] stop[, step,], dtype=None) Returns ------- out : ndarray""", 'numpy', 'arange', [{ 'doc': 'Returns\n -------\n out : ndarray', 'ret_type': [('', 'ndarray')], 'args': ( {'name': 'start', 'default_value':'None'}, {'name': 'stop'}, {'name': 'step', 'default_value': 'None'}, {'name': 'dtype', 'default_value':'None'}, ) }] ) def test_numpy_2(self): self.check_doc_str( """arange([start,] stop[, step,], dtype=None) Return - out : ndarray""", 'numpy', 'arange', [{ 'doc': 'Return - out : ndarray', 'ret_type': [('', 'ndarray')], 'args': ( {'name': 'start', 'default_value':'None'}, {'name': 'stop'}, {'name': 'step', 'default_value': 'None'}, {'name': 'dtype', 'default_value':'None'}, ) }] ) def test_reduce(self): self.check_doc_str( 'reduce(function, sequence[, initial]) -> value', BUILTIN, 'reduce', mod=__builtins__, expected = [{ 'args': ( {'name': 'function'}, {'name': 'sequence'}, {'default_value': 'None', 'name': 'initial'} ), 'doc': '', 'ret_type': [('', 'value')] }] ) def test_pygame_draw_arc(self): self.check_doc_str( 'pygame.draw.arc(Surface, color, Rect, start_angle, stop_angle, width=1): return Rect', 'draw', 'arc', [{ 'args': ( {'name': 'Surface'}, {'name': 'color'}, {'name': 'Rect'}, {'name': 'start_angle'}, {'name': 'stop_angle'}, {'default_value': '1', 'name': 'width'} ), 'doc': '', 'ret_type': [('', 'Rect')] }] ) def test_isdigit(self): self.check_doc_str( '''B.isdigit() -> bool Return True if all characters in B are digits and there is at least one character in B, False otherwise.''', 'bytes', 'isdigit', [{ 'args': (), 'doc': 'Return True if all characters in B are digits\nand there is at least one character in B, False otherwise.', 'ret_type': [(BUILTIN, 'bool')] }] ) def test_init(self): self.check_doc_str( 'x.__init__(...) initializes x; see help(type(x)) for signature', 'str', '__init__', [{'args': ({'arg_format': '*', 'name': 'args'},), 'doc': 'initializes x; see help(type(x)) for signature'}] ) def test_find(self): self.check_doc_str( 'S.find(sub [,start [,end]]) -> int', 'str', 'find', [{ 'args': ( {'name': 'sub'}, {'default_value': 'None', 'name': 'start'}, {'default_value': 'None', 'name': 'end'} ), 'doc': '', 'ret_type': [(BUILTIN, 'int')] }] ) def test_format(self): self.check_doc_str( 'S.format(*args, **kwargs) -> unicode', 'str', 'format', [{ 'args': ( {'arg_format': '*', 'name': 'args'}, {'arg_format': '**', 'name': 'kwargs'} ), 'doc': '', 'ret_type': [(BUILTIN, unicode.__name__)] }] ) def test_ascii(self): self.check_doc_str( "'ascii(object) -> string\n\nReturn the same as repr(). In Python 3.x, the repr() result will\\ncontain printable characters unescaped, while the ascii() result\\nwill have such characters backslash-escaped.'", 'future_builtins', 'ascii', [{ 'args': ({'name': 'object'},), 'doc': "Return the same as repr(). In Python 3.x, the repr() result will\\ncontain printable characters unescaped, while the ascii() result\\nwill have such characters backslash-escaped.'", 'ret_type': [(BUILTIN, 'str')] }] ) def test_preannotation(self): self.check_doc_str( 'f(INT class_code) => SpaceID', 'fob', 'f', [{ 'args': ({'name': 'class_code', 'type': [(BUILTIN, 'int')]},), 'doc': '', 'ret_type': [('', 'SpaceID')] }]) def test_compress(self): self.check_doc_str( 'compress(data, selectors) --> iterator over selected data\n\nReturn data elements', 'itertools', 'compress', [{ 'args': ({'name': 'data'}, {'name': 'selectors'}), 'doc': 'Return data elements', 'ret_type': [('', 'iterator')] }] ) def test_isinstance(self): self.check_doc_str( 'isinstance(object, class-or-type-or-tuple) -> bool\n\nReturn whether an object is an ' 'instance of a class or of a subclass thereof.\nWith a type as second argument, ' 'return whether that is the object\'s type.\nThe form using a tuple, isinstance(x, (A, B, ...)),' ' is a shortcut for\nisinstance(x, A) or isinstance(x, B) or ... (etc.).', BUILTIN, 'isinstance', [{ 'args': ({'name': 'object'}, {'name': 'class-or-type-or-tuple'}), 'doc': "Return whether an object is an instance of a class or of a subclass thereof.\n" "With a type as second argument, return whether that is the object's type.\n" "The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for\n" "isinstance(x, A) or isinstance(x, B) or ... (etc.).", 'ret_type': [(BUILTIN, 'bool')] }] ) def test_tuple_parameters(self): self.check_doc_str( 'pygame.Rect(left, top, width, height): return Rect\n' 'pygame.Rect((left, top), (width, height)): return Rect\n' 'pygame.Rect(object): return Rect\n' 'pygame object for storing rectangular coordinates', 'pygame', 'Rect', [{ 'args': ({'name': 'left'}, {'name': 'top'}, {'name': 'width'}, {'name': 'height'}), 'doc': 'pygame object for storing rectangular coordinates', 'ret_type': [('', 'Rect')] }, { 'args': ({'name': 'left, top'}, {'name': 'width, height'}), 'doc': 'pygame object for storing rectangular coordinates', 'ret_type': [('', 'Rect')] }, { 'args': ({'name': 'object'},), 'doc': 'pygame object for storing rectangular coordinates', 'ret_type': [('', 'Rect')] }] ) def test_read(self): self.check_doc_str( 'read([size]) -> read at most size bytes, returned as a string.\n\n' 'If the size argument is negative or omitted, read until EOF is reached.\n' 'Notice that when in non-blocking mode, less data than what was requested\n' 'may be returned, even if no size parameter was given.', BUILTIN, 'read', mod=__builtins__, expected=[{ 'args': ({'default_value': 'None', 'name': 'size'},), 'doc': 'read at most size bytes, returned as a string.\n\nIf the size argument is negative or omitted, read until EOF is reached.\nNotice that when in non-blocking mode, less data than what was requested\nmay be returned, even if no size parameter was given.', 'ret_type': [('', '')] }] ) r = get_overloads_from_doc_string( 'read([size]) -> read at most size bytes, returned as a string.\n\n' 'If the size argument is negative or omitted, read until EOF is reached.\n' 'Notice that when in non-blocking mode, less data than what was requested\n' 'may be returned, even if no size parameter was given.', __builtins__, None, 'read' ) self.assertEqual( r, [{ 'args': ({'default_value': 'None', 'name': 'size'},), 'doc': 'read at most size bytes, returned as a string.\n\nIf the size argument is negative or omitted, read until EOF is reached.\nNotice that when in non-blocking mode, less data than what was requested\nmay be returned, even if no size parameter was given.', 'ret_type': [('', '')] }], repr(r) ) def test_new(self): self.check_doc_str( 'T.__new__(S, ...) -> a new object with type S, a subtype of T', 'struct', '__new__', [{ 'ret_type': [('', '')], 'doc': 'a new object with type S, a subtype of T', 'args': ({'name': 'S'}, {'arg_format': '*', 'name': 'args'}) }] ) def test_C_prototype(self): self.check_doc_str( 'GetDriverByName(char const * name) -> Driver', '', 'GetDriverByName', [{ 'ret_type': [('', 'Driver')], 'doc': '', 'args': ({'name': 'name', 'type': [(BUILTIN, 'str')]},), }] ) def test_chmod(self): self.check_doc_str( 'chmod(path, mode, *, dir_fd=None, follow_symlinks=True)', 'nt', 'chmod', [{ 'doc': '', 'args': ( {'name': 'path'}, {'name': 'mode'}, {'name': 'args', 'arg_format': '*'}, {'name': 'dir_fd', 'default_value': 'None'}, {'name': 'follow_symlinks', 'default_value': 'True'} ) }] ) def test_open(self): if sys.version_info[0] >= 3: expect_ret_type = ('_io', '_IOBase') else: expect_ret_type = (BUILTIN, 'file') self.check_doc_str( 'open(file, mode=\'r\', buffering=-1, encoding=None,\n' + ' errors=None, newline=None, closefd=True, opener=None)' + ' -> file object\n\nOpen file', BUILTIN, 'open', [{ 'doc': 'Open file', 'ret_type': [expect_ret_type], 'args': ( {'name': 'file'}, {'name': 'mode', 'default_value': "'r'"}, {'name': 'buffering', 'default_value': '-1'}, {'name': 'encoding', 'default_value': 'None'}, {'name': 'errors', 'default_value': 'None'}, {'name': 'newline', 'default_value': 'None'}, {'name': 'closefd', 'default_value': 'True'}, {'name': 'opener', 'default_value': 'None'}, ) }] ) def test_optional_with_default(self): self.check_doc_str( 'max(iterable[, key=func]) -> value', BUILTIN, 'max', [{ 'doc': '', 'ret_type': [('', 'value')], 'args': ( {'name': 'iterable'}, {'name': 'key', 'default_value': 'func'} ) }] ) def test_pyplot_figure(self): pyplot_doc = """ Creates a new figure. Parameters ---------- num : integer or string, optional, default: none If not provided, a new figure will be created, and a the figure number will be increamted. The figure objects holds this number in a `number` attribute. If num is provided, and a figure with this id already exists, make it active, and returns a reference to it. If this figure does not exists, create it and returns it. If num is a string, the window title will be set to this figure's `num`. figsize : tuple of integers, optional, default : None width, height in inches. If not provided, defaults to rc figure.figsize. dpi : integer, optional, default ; None resolution of the figure. If not provided, defaults to rc figure.dpi. facecolor : the background color; If not provided, defaults to rc figure.facecolor edgecolor : the border color. If not provided, defaults to rc figure.edgecolor Returns ------- figure : Figure The Figure instance returned will also be passed to new_figure_manager in the backends, which allows to hook custom Figure classes into the pylab interface. Additional kwargs will be passed to the figure init function. Note ---- If you are creating many figures, make sure you explicitly call "close" on the figures you are not using, because this will enable pylab to properly clean up the memory. rcParams defines the default values, which can be modified in the matplotlibrc file """ self.check_doc_str( pyplot_doc, 'matplotlib.pyplot', 'figure', [{ 'doc': pyplot_doc, 'ret_type': [('', 'Figure')], 'args': ( {'name': 'args', 'arg_format': '*'}, {'name': 'kwargs', 'arg_format': '**'} ) }] ) if __name__ == '__main__': unittest.main()
apache-2.0
Nandini-K/Artificial_Intelligence_and_Machine_Learning
K-Means Clustering.py
1
5296
########################################################################## # Copyright (c) 2017 Nandini Khanwalkar # nandini2@pdx.edu ########################################################################## import os import random import math import numpy as np import sklearn from sklearn.metrics import * train_data = np.loadtxt('optdigits.train', delimiter=',') X_train, y_train = train_data[:, np.arange(64)], train_data[:, 64] test_data = np.loadtxt('optdigits.test', delimiter=',') X_test, y_test = test_data[:, np.arange(64)], test_data[:, 64] def find_nearest_centre(centers, dataset, k): idx_cluster = [[] for i in range(k)] # Make k lists corresponding to each cluster-center for i in range(0, dataset.shape[0]): # For each vector in dataset C_i = np.argmin(np.sqrt(np.sum(np.power(centers - dataset[i, :], 2), axis=1))) # Compute closest cluster center idx_cluster[C_i].append(i) # Add vector index to that cluster-center's list return idx_cluster # Return cluster indices def find_K_means(k): centroids = np.random.choice(np.arange(17), k*64, replace=1).reshape(k, 64) # Randomly initialize k cluster centers while True: idx_cluster = find_nearest_centre(centroids, X_train, k) # Seperate data into clusters by finding nearest center k_means = [] for i in range(k): # Do for all clusters: if (len(idx_cluster[i]) != 0): # If the cluster is not empty k_means.append(np.mean(X_train[idx_cluster[i], :], axis=0)) # Move the center to the mean of all vectors in the cluster else: # Otherwise k_means.append(centroids[i, :]) # Keep center at its place if (np.sum(abs(centroids - k_means)) == 0): # If there is no change in any of the centers break # Stop moving the centers and exit loop centroids = np.asarray(k_means) # Otherwise repeat with new set of centers # Compute Errors : MSE = [] for i in range(k): MSE.append(np.nan_to_num(np.divide(np.sum(np.power(X_train[idx_cluster[i], :] - k_means[i], 2)), len(idx_cluster[i])))) Avg_MSE = np.divide(np.sum(MSE), np.count_nonzero(idx_cluster)) Sq_Sep = 0 for i in range(k): for j in range(i+1, k): Sq_Sep += np.sum(np.power(k_means[i] - k_means[j], 2)) MSS = (2*Sq_Sep)/(k*(k-1)) return np.asarray(k_means)[np.nonzero(idx_cluster)[0], :], MSE, Avg_MSE, MSS, np.asarray(idx_cluster)[np.nonzero(idx_cluster)[0]] def get_best_clustering(k): Avg_MSE = float('Infinity') for i in range(5): # Cluster the entire data 5 times model = find_K_means(k) if model[2]<Avg_MSE: # If the current Avg_MSE is less than previous then swap the clustering results best_model = model # Replace best_model with new best_model Avg_MSE = model[2] # Replace lest Avg_MSE with ne least Avg_MSE return best_model # Return the clustering results which had least Avg_MSE def assign_cluster_class(idx_cluster): cluster_class = [] for i in range(len(idx_cluster)): # Do for each cluster: count = np.zeros(10) for j in range(len(y_train[idx_cluster[i]])): # Among all the vectors in that cluster count[int(y_train[idx_cluster[i]][j])] += 1 # Count the occurence of each class cluster_class.append(np.argmax(count)) # Find the most frequent class and assign it to that cluster return cluster_class def visualize_cluster_centers(Clustering, cluster_class, directory): rootdir = os.getcwd() # Get current working directory if not os.path.exists(directory): os.makedirs(directory) # Create a directory corresponding to value of k in current directory outdir = os.path.join(rootdir, directory) for i in range(len(Clustering)): # Create a .pgm file for each cluster fout = open(os.path.join(outdir, '_'.join(['Cluster', str(i), 'Class', str(cluster_class[i]), '.pgm'])), 'w+') fout.write('P2\n8 8\n16\n') # Write header in .pgm file for j in range(64): fout.write(str(math.floor(Clustering[i,j])) + ' ') # Write image data in .pgm file def K_means_Clustering(k): Clustering, MSE, Avg_MSE, MSS, idx_cluster = get_best_clustering(k) # Get best clustering for given k print('\n Results for (K =', k,') :\n\n\tNo. of clusters = ', len(Clustering), '\n\tAverage Mean Square Error = ', Avg_MSE, '\n\tMean Square Seperation = ', MSS, '\n') cluster_class = assign_cluster_class(idx_cluster) # Assign classes to each cluster center print('\tAssigned classes to clusters : ', cluster_class) test_set_clusters = find_nearest_centre(Clustering, X_test, len(cluster_class)) # Cluster test data by finding nearest center for each vector pred = np.zeros(y_test.shape[0]) for i in range(len(test_set_clusters)): # For each cluster pred[test_set_clusters[i]] = cluster_class[i] # For vectors in a cluster, predicted class is the class assigned to the cluster print('\tClustering Accuracy = ', accuracy_score(y_test, pred), '\n\n Confusion Matrix :\n') print(confusion_matrix(y_test, pred), '\n') visualize_cluster_centers(Clustering, cluster_class, '-'.join(['K',str(k)])) K_means_Clustering(k=10) # Preform K-means clustering for k = 10 k =30 while(k): print('\n==================================================') K_means_Clustering(k) # Preform K-means clustering for k = 30 and then for any input k value k = int(input("\tEnter number of initial random seeds\n\tOR\n\tEnter 0 to exit...\n"))
mit
pierreg/tensorflow
tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py
30
3738
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests NumpySource and PandasSource.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False def get_rows(array, row_indices): rows = [array[i] for i in row_indices] return np.vstack(rows) class NumpySourceTestCase(tf.test.TestCase): def testNumpySource(self): batch_size = 3 iterations = 1000 array = np.arange(32).reshape([16, 2]) numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size) index_column = numpy_source().index value_column = numpy_source().value cache = {} with tf.Graph().as_default(): value_tensor = value_column.build(cache) index_tensor = index_column.build(cache) with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(iterations): expected_index = [ j % array.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_value = get_rows(array, expected_index) actual_index, actual_value = sess.run([index_tensor, value_tensor]) np.testing.assert_array_equal(expected_index, actual_index) np.testing.assert_array_equal(expected_value, actual_value) coord.request_stop() coord.join(threads) class PandasSourceTestCase(tf.test.TestCase): def testPandasFeeding(self): if not HAS_PANDAS: return batch_size = 3 iterations = 1000 index = np.arange(100, 132) a = np.arange(32) b = np.arange(32, 64) dataframe = pd.DataFrame({"a": a, "b": b}, index=index) pandas_source = in_memory_source.PandasSource(dataframe, batch_size=batch_size) pandas_columns = pandas_source() cache = {} with tf.Graph().as_default(): pandas_tensors = [col.build(cache) for col in pandas_columns] with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(iterations): indices = [j % dataframe.shape[0] for j in range(batch_size * i, batch_size * (i + 1))] expected_df_indices = dataframe.index[indices] expected_rows = dataframe.iloc[indices] actual_value = sess.run(pandas_tensors) np.testing.assert_array_equal(expected_df_indices, actual_value[0]) for col_num, col in enumerate(dataframe.columns): np.testing.assert_array_equal(expected_rows[col].values, actual_value[col_num + 1]) coord.request_stop() coord.join(threads) if __name__ == "__main__": tf.test.main()
apache-2.0
ClimbsRocks/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
Ziqi-Li/bknqgis
pandas/pandas/tests/io/formats/test_to_latex.py
9
13691
from datetime import datetime import pytest import pandas as pd from pandas import DataFrame, compat, Series from pandas.util import testing as tm from pandas.compat import u import codecs @pytest.fixture def frame(): return DataFrame(tm.getSeriesData()) class TestToLatex(object): def test_to_latex_filename(self, frame): with tm.ensure_clean('test.tex') as path: frame.to_latex(path) with open(path, 'r') as f: assert frame.to_latex() == f.read() # test with utf-8 and encoding option (GH 7061) df = DataFrame([[u'au\xdfgangen']]) with tm.ensure_clean('test.tex') as path: df.to_latex(path, encoding='utf-8') with codecs.open(path, 'r', encoding='utf-8') as f: assert df.to_latex() == f.read() # test with utf-8 without encoding option if compat.PY3: # python3: pandas default encoding is utf-8 with tm.ensure_clean('test.tex') as path: df.to_latex(path) with codecs.open(path, 'r', encoding='utf-8') as f: assert df.to_latex() == f.read() else: # python2 default encoding is ascii, so an error should be raised with tm.ensure_clean('test.tex') as path: with pytest.raises(UnicodeEncodeError): df.to_latex(path) def test_to_latex(self, frame): # it works! frame.to_latex() df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) withindex_result = df.to_latex() withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule a & b \\ \midrule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected def test_to_latex_format(self, frame): # GH Bug #9402 frame.to_latex(column_format='ccc') df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) withindex_result = df.to_latex(column_format='ccc') withindex_expected = r"""\begin{tabular}{ccc} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_with_formatters(self): df = DataFrame({'int': [1, 2, 3], 'float': [1.0, 2.0, 3.0], 'object': [(1, 2), True, False], 'datetime64': [datetime(2016, 1, 1), datetime(2016, 2, 5), datetime(2016, 3, 3)]}) formatters = {'int': lambda x: '0x%x' % x, 'float': lambda x: '[% 4.1f]' % x, 'object': lambda x: '-%s-' % str(x), 'datetime64': lambda x: x.strftime('%Y-%m'), '__index__': lambda x: 'index: %s' % x} result = df.to_latex(formatters=dict(formatters)) expected = r"""\begin{tabular}{llrrl} \toprule {} & datetime64 & float & int & object \\ \midrule index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_multiindex(self): df = DataFrame({('x', 'y'): ['a']}) result = df.to_latex() expected = r"""\begin{tabular}{ll} \toprule {} & x \\ {} & y \\ \midrule 0 & a \\ \bottomrule \end{tabular} """ assert result == expected result = df.T.to_latex() expected = r"""\begin{tabular}{lll} \toprule & & 0 \\ \midrule x & y & a \\ \bottomrule \end{tabular} """ assert result == expected df = DataFrame.from_dict({ ('c1', 0): pd.Series(dict((x, x) for x in range(4))), ('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))), ('c2', 0): pd.Series(dict((x, x) for x in range(4))), ('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))), ('c3', 0): pd.Series(dict((x, x) for x in range(4))), }).T result = df.to_latex() expected = r"""\begin{tabular}{llrrrr} \toprule & & 0 & 1 & 2 & 3 \\ \midrule c1 & 0 & 0 & 1 & 2 & 3 \\ & 1 & 4 & 5 & 6 & 7 \\ c2 & 0 & 0 & 1 & 2 & 3 \\ & 1 & 4 & 5 & 6 & 7 \\ c3 & 0 & 0 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ assert result == expected # GH 14184 df = df.T df.columns.names = ['a', 'b'] result = df.to_latex() expected = r"""\begin{tabular}{lrrrrr} \toprule a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ b & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 4 & 0 & 4 & 0 \\ 1 & 1 & 5 & 1 & 5 & 1 \\ 2 & 2 & 6 & 2 & 6 & 2 \\ 3 & 3 & 7 & 3 & 7 & 3 \\ \bottomrule \end{tabular} """ assert result == expected # GH 10660 df = pd.DataFrame({'a': [0, 0, 1, 1], 'b': list('abab'), 'c': [1, 2, 3, 4]}) result = df.set_index(['a', 'b']).to_latex() expected = r"""\begin{tabular}{llr} \toprule & & c \\ a & b & \\ \midrule 0 & a & 1 \\ & b & 2 \\ 1 & a & 3 \\ & b & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.groupby('a').describe().to_latex() expected = r"""\begin{tabular}{lrrrrrrrr} \toprule {} & \multicolumn{8}{l}{c} \\ {} & count & mean & std & min & 25\% & 50\% & 75\% & max \\ a & & & & & & & & \\ \midrule 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\ 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_multicolumnrow(self): df = pd.DataFrame({ ('c1', 0): dict((x, x) for x in range(5)), ('c1', 1): dict((x, x + 5) for x in range(5)), ('c2', 0): dict((x, x) for x in range(5)), ('c2', 1): dict((x, x + 5) for x in range(5)), ('c3', 0): dict((x, x) for x in range(5)) }) result = df.to_latex() expected = r"""\begin{tabular}{lrrrrr} \toprule {} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ {} & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 5 & 0 & 5 & 0 \\ 1 & 1 & 6 & 1 & 6 & 1 \\ 2 & 2 & 7 & 2 & 7 & 2 \\ 3 & 3 & 8 & 3 & 8 & 3 \\ 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.to_latex(multicolumn=False) expected = r"""\begin{tabular}{lrrrrr} \toprule {} & c1 & & c2 & & c3 \\ {} & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 5 & 0 & 5 & 0 \\ 1 & 1 & 6 & 1 & 6 & 1 \\ 2 & 2 & 7 & 2 & 7 & 2 \\ 3 & 3 & 8 & 3 & 8 & 3 \\ 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.T.to_latex(multirow=True) expected = r"""\begin{tabular}{llrrrrr} \toprule & & 0 & 1 & 2 & 3 & 4 \\ \midrule \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ assert result == expected df.index = df.T.index result = df.T.to_latex(multirow=True, multicolumn=True, multicolumn_format='c') expected = r"""\begin{tabular}{llrrrrr} \toprule & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\ & & 0 & 1 & 0 & 1 & 0 \\ \midrule \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_escape(self): a = 'a' b = 'b' test_dict = {u('co^l1'): {a: "a", b: "b"}, u('co$e^x$'): {a: "a", b: "b"}} unescaped_result = DataFrame(test_dict).to_latex(escape=False) escaped_result = DataFrame(test_dict).to_latex( ) # default: escape=True unescaped_expected = r'''\begin{tabular}{lll} \toprule {} & co$e^x$ & co^l1 \\ \midrule a & a & a \\ b & b & b \\ \bottomrule \end{tabular} ''' escaped_expected = r'''\begin{tabular}{lll} \toprule {} & co\$e\textasciicircumx\$ & co\textasciicircuml1 \\ \midrule a & a & a \\ b & b & b \\ \bottomrule \end{tabular} ''' assert unescaped_result == unescaped_expected assert escaped_result == escaped_expected def test_to_latex_longtable(self, frame): frame.to_latex(longtable=True) df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) withindex_result = df.to_latex(longtable=True) withindex_expected = r"""\begin{longtable}{lrl} \toprule {} & a & b \\ \midrule \endhead \midrule \multicolumn{3}{r}{{Continued on next page}} \\ \midrule \endfoot \bottomrule \endlastfoot 0 & 1 & b1 \\ 1 & 2 & b2 \\ \end{longtable} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, longtable=True) withoutindex_expected = r"""\begin{longtable}{rl} \toprule a & b \\ \midrule \endhead \midrule \multicolumn{3}{r}{{Continued on next page}} \\ \midrule \endfoot \bottomrule \endlastfoot 1 & b1 \\ 2 & b2 \\ \end{longtable} """ assert withoutindex_result == withoutindex_expected def test_to_latex_escape_special_chars(self): special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^', '\\'] df = DataFrame(data=special_characters) observed = df.to_latex() expected = r"""\begin{tabular}{ll} \toprule {} & 0 \\ \midrule 0 & \& \\ 1 & \% \\ 2 & \$ \\ 3 & \# \\ 4 & \_ \\ 5 & \{ \\ 6 & \} \\ 7 & \textasciitilde \\ 8 & \textasciicircum \\ 9 & \textbackslash \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_no_header(self): # GH 7124 df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) withindex_result = df.to_latex(header=False) withindex_expected = r"""\begin{tabular}{lrl} \toprule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, header=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected def test_to_latex_specified_header(self): # GH 7124 df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) withindex_result = df.to_latex(header=['AA', 'BB']) withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & AA & BB \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(header=['AA', 'BB'], index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule AA & BB \\ \midrule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected withoutescape_result = df.to_latex(header=['$A$', '$B$'], escape=False) withoutescape_expected = r"""\begin{tabular}{lrl} \toprule {} & $A$ & $B$ \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutescape_result == withoutescape_expected with pytest.raises(ValueError): df.to_latex(header=['A']) def test_to_latex_decimal(self, frame): # GH 12031 frame.to_latex() df = DataFrame({'a': [1.0, 2.1], 'b': ['b1', 'b2']}) withindex_result = df.to_latex(decimal=',') withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1,0 & b1 \\ 1 & 2,1 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_series(self): s = Series(['a', 'b', 'c']) withindex_result = s.to_latex() withindex_expected = r"""\begin{tabular}{ll} \toprule {} & 0 \\ \midrule 0 & a \\ 1 & b \\ 2 & c \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_bold_rows(self): # GH 16707 df = pd.DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) observed = df.to_latex(bold_rows=True) expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule \textbf{0} & 1 & b1 \\ \textbf{1} & 2 & b2 \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_no_bold_rows(self): # GH 16707 df = pd.DataFrame({'a': [1, 2], 'b': ['b1', 'b2']}) observed = df.to_latex(bold_rows=False) expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert observed == expected
gpl-2.0
neocogent/electrum
electrum/gui/qt/history_list.py
2
31462
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import datetime from datetime import date from typing import TYPE_CHECKING, Tuple, Dict import threading from enum import IntEnum from decimal import Decimal from PyQt5.QtGui import QMouseEvent, QFont, QBrush, QColor from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, QAbstractItemModel, QSortFilterProxyModel, QVariant, QItemSelectionModel, QDate, QPoint) from PyQt5.QtWidgets import (QMenu, QHeaderView, QLabel, QMessageBox, QPushButton, QComboBox, QVBoxLayout, QCalendarWidget, QGridLayout) from electrum.address_synchronizer import TX_HEIGHT_LOCAL from electrum.i18n import _ from electrum.util import (block_explorer_URL, profiler, TxMinedInfo, OrderedDictWithIndex, timestamp_to_datetime) from electrum.logging import get_logger, Logger from .util import (read_QIcon, MONOSPACE_FONT, Buttons, CancelButton, OkButton, filename_field, MyTreeView, AcceptFileDragDrop, WindowModalDialog, CloseButton, webopen) if TYPE_CHECKING: from electrum.wallet import Abstract_Wallet _logger = get_logger(__name__) try: from electrum.plot import plot_history, NothingToPlotException except: _logger.info("could not import electrum.plot. This feature needs matplotlib to be installed.") plot_history = None # note: this list needs to be kept in sync with another in kivy TX_ICONS = [ "unconfirmed.png", "warning.png", "unconfirmed.png", "offline_tx.png", "clock1.png", "clock2.png", "clock3.png", "clock4.png", "clock5.png", "confirmed.png", ] class HistoryColumns(IntEnum): STATUS_ICON = 0 STATUS_TEXT = 1 DESCRIPTION = 2 COIN_VALUE = 3 RUNNING_COIN_BALANCE = 4 FIAT_VALUE = 5 FIAT_ACQ_PRICE = 6 FIAT_CAP_GAINS = 7 TXID = 8 class HistorySortModel(QSortFilterProxyModel): def lessThan(self, source_left: QModelIndex, source_right: QModelIndex): item1 = self.sourceModel().data(source_left, Qt.UserRole) item2 = self.sourceModel().data(source_right, Qt.UserRole) if item1 is None or item2 is None: raise Exception(f'UserRole not set for column {source_left.column()}') v1 = item1.value() v2 = item2.value() if v1 is None or isinstance(v1, Decimal) and v1.is_nan(): v1 = -float("inf") if v2 is None or isinstance(v2, Decimal) and v2.is_nan(): v2 = -float("inf") try: return v1 < v2 except: return False class HistoryModel(QAbstractItemModel, Logger): def __init__(self, parent): QAbstractItemModel.__init__(self, parent) Logger.__init__(self) self.parent = parent self.view = None # type: HistoryList self.transactions = OrderedDictWithIndex() self.tx_status_cache = {} # type: Dict[str, Tuple[int, str]] self.summary = None def set_view(self, history_list: 'HistoryList'): # FIXME HistoryModel and HistoryList mutually depend on each other. # After constructing both, this method needs to be called. self.view = history_list # type: HistoryList self.set_visibility_of_columns() def columnCount(self, parent: QModelIndex): return len(HistoryColumns) def rowCount(self, parent: QModelIndex): return len(self.transactions) def index(self, row: int, column: int, parent: QModelIndex): return self.createIndex(row, column) def data(self, index: QModelIndex, role: Qt.ItemDataRole) -> QVariant: # note: this method is performance-critical. # it is called a lot, and so must run extremely fast. assert index.isValid() col = index.column() tx_item = self.transactions.value_from_pos(index.row()) tx_hash = tx_item['txid'] conf = tx_item['confirmations'] txpos = tx_item['txpos_in_block'] or 0 height = tx_item['height'] try: status, status_str = self.tx_status_cache[tx_hash] except KeyError: tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) status, status_str = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info) if role == Qt.UserRole: # for sorting d = { HistoryColumns.STATUS_ICON: # height breaks ties for unverified txns # txpos breaks ties for verified same block txns (conf, -status, -height, -txpos), HistoryColumns.STATUS_TEXT: status_str, HistoryColumns.DESCRIPTION: tx_item['label'], HistoryColumns.COIN_VALUE: tx_item['value'].value, HistoryColumns.RUNNING_COIN_BALANCE: tx_item['balance'].value, HistoryColumns.FIAT_VALUE: tx_item['fiat_value'].value if 'fiat_value' in tx_item else None, HistoryColumns.FIAT_ACQ_PRICE: tx_item['acquisition_price'].value if 'acquisition_price' in tx_item else None, HistoryColumns.FIAT_CAP_GAINS: tx_item['capital_gain'].value if 'capital_gain' in tx_item else None, HistoryColumns.TXID: tx_hash, } return QVariant(d[col]) if role not in (Qt.DisplayRole, Qt.EditRole): if col == HistoryColumns.STATUS_ICON and role == Qt.DecorationRole: return QVariant(read_QIcon(TX_ICONS[status])) elif col == HistoryColumns.STATUS_ICON and role == Qt.ToolTipRole: return QVariant(str(conf) + _(" confirmation" + ("s" if conf != 1 else ""))) elif col > HistoryColumns.DESCRIPTION and role == Qt.TextAlignmentRole: return QVariant(Qt.AlignRight | Qt.AlignVCenter) elif col != HistoryColumns.STATUS_TEXT and role == Qt.FontRole: monospace_font = QFont(MONOSPACE_FONT) return QVariant(monospace_font) elif col == HistoryColumns.DESCRIPTION and role == Qt.DecorationRole \ and self.parent.wallet.invoices.paid.get(tx_hash): return QVariant(read_QIcon("seal")) elif col in (HistoryColumns.DESCRIPTION, HistoryColumns.COIN_VALUE) \ and role == Qt.ForegroundRole and tx_item['value'].value < 0: red_brush = QBrush(QColor("#BC1E1E")) return QVariant(red_brush) elif col == HistoryColumns.FIAT_VALUE and role == Qt.ForegroundRole \ and not tx_item.get('fiat_default') and tx_item.get('fiat_value') is not None: blue_brush = QBrush(QColor("#1E1EFF")) return QVariant(blue_brush) return QVariant() if col == HistoryColumns.STATUS_TEXT: return QVariant(status_str) elif col == HistoryColumns.DESCRIPTION: return QVariant(tx_item['label']) elif col == HistoryColumns.COIN_VALUE: value = tx_item['value'].value v_str = self.parent.format_amount(value, is_diff=True, whitespaces=True) return QVariant(v_str) elif col == HistoryColumns.RUNNING_COIN_BALANCE: balance = tx_item['balance'].value balance_str = self.parent.format_amount(balance, whitespaces=True) return QVariant(balance_str) elif col == HistoryColumns.FIAT_VALUE and 'fiat_value' in tx_item: value_str = self.parent.fx.format_fiat(tx_item['fiat_value'].value) return QVariant(value_str) elif col == HistoryColumns.FIAT_ACQ_PRICE and \ tx_item['value'].value < 0 and 'acquisition_price' in tx_item: # fixme: should use is_mine acq = tx_item['acquisition_price'].value return QVariant(self.parent.fx.format_fiat(acq)) elif col == HistoryColumns.FIAT_CAP_GAINS and 'capital_gain' in tx_item: cg = tx_item['capital_gain'].value return QVariant(self.parent.fx.format_fiat(cg)) elif col == HistoryColumns.TXID: return QVariant(tx_hash) return QVariant() def parent(self, index: QModelIndex): return QModelIndex() def hasChildren(self, index: QModelIndex): return not index.isValid() def update_label(self, row): tx_item = self.transactions.value_from_pos(row) tx_item['label'] = self.parent.wallet.get_label(tx_item['txid']) topLeft = bottomRight = self.createIndex(row, 2) self.dataChanged.emit(topLeft, bottomRight, [Qt.DisplayRole]) def get_domain(self): '''Overridden in address_dialog.py''' return self.parent.wallet.get_addresses() @profiler def refresh(self, reason: str): self.logger.info(f"refreshing... reason: {reason}") assert self.parent.gui_thread == threading.current_thread(), 'must be called from GUI thread' assert self.view, 'view not set' selected = self.view.selectionModel().currentIndex() selected_row = None if selected: selected_row = selected.row() fx = self.parent.fx if fx: fx.history_used_spot = False r = self.parent.wallet.get_full_history(domain=self.get_domain(), from_timestamp=None, to_timestamp=None, fx=fx) self.set_visibility_of_columns() if r['transactions'] == list(self.transactions.values()): return old_length = len(self.transactions) if old_length != 0: self.beginRemoveRows(QModelIndex(), 0, old_length) self.transactions.clear() self.endRemoveRows() self.beginInsertRows(QModelIndex(), 0, len(r['transactions'])-1) for tx_item in r['transactions']: txid = tx_item['txid'] self.transactions[txid] = tx_item self.endInsertRows() if selected_row: self.view.selectionModel().select(self.createIndex(selected_row, 0), QItemSelectionModel.Rows | QItemSelectionModel.SelectCurrent) self.view.filter() # update summary self.summary = r['summary'] if not self.view.years and self.transactions: start_date = date.today() end_date = date.today() if len(self.transactions) > 0: start_date = self.transactions.value_from_pos(0).get('date') or start_date end_date = self.transactions.value_from_pos(len(self.transactions) - 1).get('date') or end_date self.view.years = [str(i) for i in range(start_date.year, end_date.year + 1)] self.view.period_combo.insertItems(1, self.view.years) # update tx_status_cache self.tx_status_cache.clear() for txid, tx_item in self.transactions.items(): tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) self.tx_status_cache[txid] = self.parent.wallet.get_tx_status(txid, tx_mined_info) def set_visibility_of_columns(self): def set_visible(col: int, b: bool): self.view.showColumn(col) if b else self.view.hideColumn(col) # txid set_visible(HistoryColumns.TXID, False) # fiat history = self.parent.fx.show_history() cap_gains = self.parent.fx.get_history_capital_gains_config() set_visible(HistoryColumns.FIAT_VALUE, history) set_visible(HistoryColumns.FIAT_ACQ_PRICE, history and cap_gains) set_visible(HistoryColumns.FIAT_CAP_GAINS, history and cap_gains) def update_fiat(self, row, idx): tx_item = self.transactions.value_from_pos(row) key = tx_item['txid'] fee = tx_item.get('fee') value = tx_item['value'].value fiat_fields = self.parent.wallet.get_tx_item_fiat(key, value, self.parent.fx, fee.value if fee else None) tx_item.update(fiat_fields) self.dataChanged.emit(idx, idx, [Qt.DisplayRole, Qt.ForegroundRole]) def update_tx_mined_status(self, tx_hash: str, tx_mined_info: TxMinedInfo): try: row = self.transactions.pos_from_key(tx_hash) tx_item = self.transactions[tx_hash] except KeyError: return self.tx_status_cache[tx_hash] = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info) tx_item.update({ 'confirmations': tx_mined_info.conf, 'timestamp': tx_mined_info.timestamp, 'txpos_in_block': tx_mined_info.txpos, 'date': timestamp_to_datetime(tx_mined_info.timestamp), }) topLeft = self.createIndex(row, 0) bottomRight = self.createIndex(row, len(HistoryColumns) - 1) self.dataChanged.emit(topLeft, bottomRight) def on_fee_histogram(self): for tx_hash, tx_item in list(self.transactions.items()): tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) if tx_mined_info.conf > 0: # note: we could actually break here if we wanted to rely on the order of txns in self.transactions continue self.update_tx_mined_status(tx_hash, tx_mined_info) def headerData(self, section: int, orientation: Qt.Orientation, role: Qt.ItemDataRole): assert orientation == Qt.Horizontal if role != Qt.DisplayRole: return None fx = self.parent.fx fiat_title = 'n/a fiat value' fiat_acq_title = 'n/a fiat acquisition price' fiat_cg_title = 'n/a fiat capital gains' if fx and fx.show_history(): fiat_title = '%s '%fx.ccy + _('Value') fiat_acq_title = '%s '%fx.ccy + _('Acquisition price') fiat_cg_title = '%s '%fx.ccy + _('Capital Gains') return { HistoryColumns.STATUS_ICON: '', HistoryColumns.STATUS_TEXT: _('Date'), HistoryColumns.DESCRIPTION: _('Description'), HistoryColumns.COIN_VALUE: _('Amount'), HistoryColumns.RUNNING_COIN_BALANCE: _('Balance'), HistoryColumns.FIAT_VALUE: fiat_title, HistoryColumns.FIAT_ACQ_PRICE: fiat_acq_title, HistoryColumns.FIAT_CAP_GAINS: fiat_cg_title, HistoryColumns.TXID: 'TXID', }[section] def flags(self, idx): extra_flags = Qt.NoItemFlags # type: Qt.ItemFlag if idx.column() in self.view.editable_columns: extra_flags |= Qt.ItemIsEditable return super().flags(idx) | extra_flags @staticmethod def tx_mined_info_from_tx_item(tx_item): tx_mined_info = TxMinedInfo(height=tx_item['height'], conf=tx_item['confirmations'], timestamp=tx_item['timestamp']) return tx_mined_info class HistoryList(MyTreeView, AcceptFileDragDrop): filter_columns = [HistoryColumns.STATUS_TEXT, HistoryColumns.DESCRIPTION, HistoryColumns.COIN_VALUE, HistoryColumns.TXID] def tx_item_from_proxy_row(self, proxy_row): hm_idx = self.model().mapToSource(self.model().index(proxy_row, 0)) return self.hm.transactions.value_from_pos(hm_idx.row()) def should_hide(self, proxy_row): if self.start_timestamp and self.end_timestamp: tx_item = self.tx_item_from_proxy_row(proxy_row) date = tx_item['date'] if date: in_interval = self.start_timestamp <= date <= self.end_timestamp if not in_interval: return True return False def __init__(self, parent, model: HistoryModel): super().__init__(parent, self.create_menu, stretch_column=HistoryColumns.DESCRIPTION) self.hm = model self.proxy = HistorySortModel(self) self.proxy.setSourceModel(model) self.setModel(self.proxy) self.config = parent.config AcceptFileDragDrop.__init__(self, ".txn") self.setSortingEnabled(True) self.start_timestamp = None self.end_timestamp = None self.years = [] self.create_toolbar_buttons() self.wallet = self.parent.wallet # type: Abstract_Wallet self.sortByColumn(HistoryColumns.STATUS_ICON, Qt.AscendingOrder) self.editable_columns |= {HistoryColumns.FIAT_VALUE} self.header().setStretchLastSection(False) for col in HistoryColumns: sm = QHeaderView.Stretch if col == self.stretch_column else QHeaderView.ResizeToContents self.header().setSectionResizeMode(col, sm) def format_date(self, d): return str(datetime.date(d.year, d.month, d.day)) if d else _('None') def on_combo(self, x): s = self.period_combo.itemText(x) x = s == _('Custom') self.start_button.setEnabled(x) self.end_button.setEnabled(x) if s == _('All'): self.start_timestamp = None self.end_timestamp = None self.start_button.setText("-") self.end_button.setText("-") else: try: year = int(s) except: return self.start_timestamp = start_date = datetime.datetime(year, 1, 1) self.end_timestamp = end_date = datetime.datetime(year+1, 1, 1) self.start_button.setText(_('From') + ' ' + self.format_date(start_date)) self.end_button.setText(_('To') + ' ' + self.format_date(end_date)) self.hide_rows() def create_toolbar_buttons(self): self.period_combo = QComboBox() self.start_button = QPushButton('-') self.start_button.pressed.connect(self.select_start_date) self.start_button.setEnabled(False) self.end_button = QPushButton('-') self.end_button.pressed.connect(self.select_end_date) self.end_button.setEnabled(False) self.period_combo.addItems([_('All'), _('Custom')]) self.period_combo.activated.connect(self.on_combo) def get_toolbar_buttons(self): return self.period_combo, self.start_button, self.end_button def on_hide_toolbar(self): self.start_timestamp = None self.end_timestamp = None self.hide_rows() def save_toolbar_state(self, state, config): config.set_key('show_toolbar_history', state) def select_start_date(self): self.start_timestamp = self.select_date(self.start_button) self.hide_rows() def select_end_date(self): self.end_timestamp = self.select_date(self.end_button) self.hide_rows() def select_date(self, button): d = WindowModalDialog(self, _("Select date")) d.setMinimumSize(600, 150) d.date = None vbox = QVBoxLayout() def on_date(date): d.date = date cal = QCalendarWidget() cal.setGridVisible(True) cal.clicked[QDate].connect(on_date) vbox.addWidget(cal) vbox.addLayout(Buttons(OkButton(d), CancelButton(d))) d.setLayout(vbox) if d.exec_(): if d.date is None: return None date = d.date.toPyDate() button.setText(self.format_date(date)) return datetime.datetime(date.year, date.month, date.day) def show_summary(self): h = self.model().sourceModel().summary if not h: self.parent.show_message(_("Nothing to summarize.")) return start_date = h.get('start_date') end_date = h.get('end_date') format_amount = lambda x: self.parent.format_amount(x.value) + ' ' + self.parent.base_unit() d = WindowModalDialog(self, _("Summary")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() grid = QGridLayout() grid.addWidget(QLabel(_("Start")), 0, 0) grid.addWidget(QLabel(self.format_date(start_date)), 0, 1) grid.addWidget(QLabel(str(h.get('fiat_start_value')) + '/BTC'), 0, 2) grid.addWidget(QLabel(_("Initial balance")), 1, 0) grid.addWidget(QLabel(format_amount(h['start_balance'])), 1, 1) grid.addWidget(QLabel(str(h.get('fiat_start_balance'))), 1, 2) grid.addWidget(QLabel(_("End")), 2, 0) grid.addWidget(QLabel(self.format_date(end_date)), 2, 1) grid.addWidget(QLabel(str(h.get('fiat_end_value')) + '/BTC'), 2, 2) grid.addWidget(QLabel(_("Final balance")), 4, 0) grid.addWidget(QLabel(format_amount(h['end_balance'])), 4, 1) grid.addWidget(QLabel(str(h.get('fiat_end_balance'))), 4, 2) grid.addWidget(QLabel(_("Income")), 5, 0) grid.addWidget(QLabel(format_amount(h.get('incoming'))), 5, 1) grid.addWidget(QLabel(str(h.get('fiat_incoming'))), 5, 2) grid.addWidget(QLabel(_("Expenditures")), 6, 0) grid.addWidget(QLabel(format_amount(h.get('outgoing'))), 6, 1) grid.addWidget(QLabel(str(h.get('fiat_outgoing'))), 6, 2) grid.addWidget(QLabel(_("Capital gains")), 7, 0) grid.addWidget(QLabel(str(h.get('fiat_capital_gains'))), 7, 2) grid.addWidget(QLabel(_("Unrealized gains")), 8, 0) grid.addWidget(QLabel(str(h.get('fiat_unrealized_gains', ''))), 8, 2) vbox.addLayout(grid) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() def plot_history_dialog(self): if plot_history is None: self.parent.show_message( _("Can't plot history.") + '\n' + _("Perhaps some dependencies are missing...") + " (matplotlib?)") return try: plt = plot_history(list(self.hm.transactions.values())) plt.show() except NothingToPlotException as e: self.parent.show_message(str(e)) def on_edited(self, index, user_role, text): index = self.model().mapToSource(index) row, column = index.row(), index.column() tx_item = self.hm.transactions.value_from_pos(row) key = tx_item['txid'] if column == HistoryColumns.DESCRIPTION: if self.wallet.set_label(key, text): #changed self.hm.update_label(row) self.parent.update_completions() elif column == HistoryColumns.FIAT_VALUE: self.wallet.set_fiat_value(key, self.parent.fx.ccy, text, self.parent.fx, tx_item['value'].value) value = tx_item['value'].value if value is not None: self.hm.update_fiat(row, index) else: assert False def mouseDoubleClickEvent(self, event: QMouseEvent): idx = self.indexAt(event.pos()) if not idx.isValid(): return tx_item = self.tx_item_from_proxy_row(idx.row()) if self.hm.flags(self.model().mapToSource(idx)) & Qt.ItemIsEditable: super().mouseDoubleClickEvent(event) else: self.show_transaction(tx_item['txid']) def show_transaction(self, tx_hash): tx = self.wallet.db.get_transaction(tx_hash) if not tx: return label = self.wallet.get_label(tx_hash) or None # prefer 'None' if not defined (force tx dialog to hide Description field if missing) self.parent.show_transaction(tx, label) def create_menu(self, position: QPoint): org_idx: QModelIndex = self.indexAt(position) idx = self.proxy.mapToSource(org_idx) if not idx.isValid(): # can happen e.g. before list is populated for the first time return tx_item = self.hm.transactions.value_from_pos(idx.row()) column = idx.column() if column == HistoryColumns.STATUS_ICON: column_title = _('Transaction ID') column_data = tx_item['txid'] else: column_title = self.hm.headerData(column, Qt.Horizontal, Qt.DisplayRole) column_data = self.hm.data(idx, Qt.DisplayRole).value() tx_hash = tx_item['txid'] tx = self.wallet.db.get_transaction(tx_hash) if not tx: return tx_URL = block_explorer_URL(self.config, 'tx', tx_hash) height = self.wallet.get_tx_height(tx_hash).height is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) is_unconfirmed = height <= 0 pr_key = self.wallet.invoices.paid.get(tx_hash) menu = QMenu() if height == TX_HEIGHT_LOCAL: menu.addAction(_("Remove"), lambda: self.remove_local_tx(tx_hash)) amount_columns = [HistoryColumns.COIN_VALUE, HistoryColumns.RUNNING_COIN_BALANCE, HistoryColumns.FIAT_VALUE, HistoryColumns.FIAT_ACQ_PRICE, HistoryColumns.FIAT_CAP_GAINS] if column in amount_columns: column_data = column_data.strip() menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data)) for c in self.editable_columns: if self.isColumnHidden(c): continue label = self.hm.headerData(c, Qt.Horizontal, Qt.DisplayRole) # TODO use siblingAtColumn when min Qt version is >=5.11 persistent = QPersistentModelIndex(org_idx.sibling(org_idx.row(), c)) menu.addAction(_("Edit {}").format(label), lambda p=persistent: self.edit(QModelIndex(p))) menu.addAction(_("Details"), lambda: self.show_transaction(tx_hash)) if is_unconfirmed and tx: # note: the current implementation of RBF *needs* the old tx fee rbf = is_mine and not tx.is_final() and fee is not None if rbf: menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx)) else: child_tx = self.wallet.cpfp(tx, 0) if child_tx: menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx)) if pr_key: menu.addAction(read_QIcon("seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key)) if tx_URL: menu.addAction(_("View on block explorer"), lambda: webopen(tx_URL)) menu.exec_(self.viewport().mapToGlobal(position)) def remove_local_tx(self, delete_tx): to_delete = {delete_tx} to_delete |= self.wallet.get_depending_transactions(delete_tx) question = _("Are you sure you want to remove this transaction?") if len(to_delete) > 1: question = (_("Are you sure you want to remove this transaction and {} child transactions?") .format(len(to_delete) - 1)) if not self.parent.question(msg=question, title=_("Please confirm")): return for tx in to_delete: self.wallet.remove_transaction(tx) self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.parent.need_update.set() def onFileAdded(self, fn): try: with open(fn) as f: tx = self.parent.tx_from_text(f.read()) self.parent.save_transaction_into_wallet(tx) except IOError as e: self.parent.show_error(e) def export_history_dialog(self): d = WindowModalDialog(self, _('Export History')) d.setMinimumSize(400, 200) vbox = QVBoxLayout(d) defaultname = os.path.expanduser('~/electrum-history.csv') select_msg = _('Select file to export your wallet transactions to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) vbox.addStretch(1) hbox = Buttons(CancelButton(d), OkButton(d, _('Export'))) vbox.addLayout(hbox) #run_hook('export_history_dialog', self, hbox) self.update() if not d.exec_(): return filename = filename_e.text() if not filename: return try: self.do_export_history(filename, csv_button.isChecked()) except (IOError, os.error) as reason: export_error_label = _("Electrum was unable to produce a transaction export.") self.parent.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history")) return self.parent.show_message(_("Your wallet history has been successfully exported.")) def do_export_history(self, file_name, is_csv): hist = self.wallet.get_full_history(domain=self.hm.get_domain(), from_timestamp=None, to_timestamp=None, fx=self.parent.fx, show_fees=True) txns = hist['transactions'] lines = [] if is_csv: for item in txns: lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item.get('fiat_value', ''), item.get('fee', ''), item.get('fiat_fee', ''), item['date']]) with open(file_name, "w+", encoding='utf-8') as f: if is_csv: import csv transaction = csv.writer(f, lineterminator='\n') transaction.writerow(["transaction_hash", "label", "confirmations", "value", "fiat_value", "fee", "fiat_fee", "timestamp"]) for line in lines: transaction.writerow(line) else: from electrum.util import json_encode f.write(json_encode(txns)) def text_txid_from_coordinate(self, row, col): idx = self.model().mapToSource(self.model().index(row, col)) tx_item = self.hm.transactions.value_from_pos(idx.row()) return self.hm.data(idx, Qt.DisplayRole).value(), tx_item['txid']
mit
amueller/scipy_2015_sklearn_tutorial
notebooks/helpers.py
19
5046
import numpy as np from collections import defaultdict import os from sklearn.cross_validation import StratifiedShuffleSplit from sklearn.feature_extraction import DictVectorizer # Can also use pandas! def process_titanic_line(line): # Split line on "," to get fields without comma confusion vals = line.strip().split('",') # replace spurious " characters vals = [v.replace('"', '') for v in vals] pclass = int(vals[0]) survived = int(vals[1]) name = str(vals[2]) sex = str(vals[3]) try: age = float(vals[4]) except ValueError: # Blank age age = -1 sibsp = float(vals[5]) parch = int(vals[6]) ticket = str(vals[7]) try: fare = float(vals[8]) except ValueError: # Blank fare fare = -1 cabin = str(vals[9]) embarked = str(vals[10]) boat = str(vals[11]) homedest = str(vals[12]) line_dict = {'pclass': pclass, 'survived': survived, 'name': name, 'sex': sex, 'age': age, 'sibsp': sibsp, 'parch': parch, 'ticket': ticket, 'fare': fare, 'cabin': cabin, 'embarked': embarked, 'boat': boat, 'homedest': homedest} return line_dict def load_titanic(test_size=.25, feature_skip_tuple=(), random_state=1999): f = open(os.path.join('datasets', 'titanic', 'titanic3.csv')) # Remove . from home.dest, split on quotes because some fields have commas keys = f.readline().strip().replace('.', '').split('","') lines = f.readlines() f.close() string_keys = ['name', 'sex', 'ticket', 'cabin', 'embarked', 'boat', 'homedest'] string_keys = [s for s in string_keys if s not in feature_skip_tuple] numeric_keys = ['pclass', 'age', 'sibsp', 'parch', 'fare'] numeric_keys = [n for n in numeric_keys if n not in feature_skip_tuple] train_vectorizer_list = [] test_vectorizer_list = [] n_samples = len(lines) numeric_data = np.zeros((n_samples, len(numeric_keys))) numeric_labels = np.zeros((n_samples,), dtype=int) # Doing this twice is horribly inefficient but the file is small... for n, l in enumerate(lines): line_dict = process_titanic_line(l) strings = {k: line_dict[k] for k in string_keys} numeric_labels[n] = line_dict["survived"] sss = StratifiedShuffleSplit(numeric_labels, n_iter=1, test_size=test_size, random_state=12) # This is a weird way to get the indices but it works train_idx = None test_idx = None for train_idx, test_idx in sss: pass for n, l in enumerate(lines): line_dict = process_titanic_line(l) strings = {k: line_dict[k] for k in string_keys} if n in train_idx: train_vectorizer_list.append(strings) else: test_vectorizer_list.append(strings) numeric_data[n] = np.asarray([line_dict[k] for k in numeric_keys]) train_numeric = numeric_data[train_idx] test_numeric = numeric_data[test_idx] train_labels = numeric_labels[train_idx] test_labels = numeric_labels[test_idx] vec = DictVectorizer() # .toarray() due to returning a scipy sparse array train_categorical = vec.fit_transform(train_vectorizer_list).toarray() test_categorical = vec.transform(test_vectorizer_list).toarray() train_data = np.concatenate([train_numeric, train_categorical], axis=1) test_data = np.concatenate([test_numeric, test_categorical], axis=1) keys = numeric_keys + string_keys return keys, train_data, test_data, train_labels, test_labels FIELDNAMES = ('polarity', 'id', 'date', 'query', 'author', 'text') def read_sentiment_csv(csv_file, fieldnames=FIELDNAMES, max_count=None, n_partitions=1, partition_id=0): import csv # put the import inside for use in IPython.parallel def file_opener(csv_file): try: open(csv_file, 'r', encoding="latin1").close() return open(csv_file, 'r', encoding="latin1") except TypeError: # Python 2 does not have encoding arg return open(csv_file, 'rb') texts = [] targets = [] with file_opener(csv_file) as f: reader = csv.DictReader(f, fieldnames=fieldnames, delimiter=',', quotechar='"') pos_count, neg_count = 0, 0 for i, d in enumerate(reader): if i % n_partitions != partition_id: # Skip entry if not in the requested partition continue if d['polarity'] == '4': if max_count and pos_count >= max_count / 2: continue pos_count += 1 texts.append(d['text']) targets.append(1) elif d['polarity'] == '0': if max_count and neg_count >= max_count / 2: continue neg_count += 1 texts.append(d['text']) targets.append(-1) return texts, targets
cc0-1.0
edhuckle/statsmodels
statsmodels/tools/_testing.py
29
4809
"""Testing helper functions Warning: current status experimental, mostly copy paste Warning: these functions will be changed without warning as the need during refactoring arises. The first group of functions provide consistency checks """ import numpy as np from numpy.testing import assert_allclose, assert_ from nose import SkipTest # the following are copied from # statsmodels.base.tests.test_generic_methods.CheckGenericMixin # and only adjusted to work as standalone functions def check_ttest_tvalues(results): # test that t_test has same results a params, bse, tvalues, ... res = results mat = np.eye(len(res.params)) tt = res.t_test(mat) assert_allclose(tt.effect, res.params, rtol=1e-12) # TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10) assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12) assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10) assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10) # test params table frame returned by t_test table_res = np.column_stack((res.params, res.bse, res.tvalues, res.pvalues, res.conf_int())) table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue, tt.conf_int())) table2 = tt.summary_frame().values assert_allclose(table2, table_res, rtol=1e-12) # move this to test_attributes ? assert_(hasattr(res, 'use_t')) tt = res.t_test(mat[0]) tt.summary() # smoke test for #1323 assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10) def check_ftest_pvalues(results): res = results use_t = res.use_t k_vars = len(res.params) # check default use_t pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue for k in range(k_vars)] assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25) # sutomatic use_f based on results class use_t pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue for k in range(k_vars)] assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25) # label for pvalues in summary string_use_t = 'P>|z|' if use_t is False else 'P>|t|' summ = str(res.summary()) assert_(string_use_t in summ) # try except for models that don't have summary2 try: summ2 = str(res.summary2()) except AttributeError: summ2 = None if summ2 is not None: assert_(string_use_t in summ2) # TODO The following is not (yet) guaranteed across models #@knownfailureif(True) def check_fitted(results): # ignore wrapper for isinstance check from statsmodels.genmod.generalized_linear_model import GLMResults from statsmodels.discrete.discrete_model import DiscreteResults # FIXME: work around GEE has no wrapper if hasattr(results, '_results'): results = results._results else: results = results if (isinstance(results, GLMResults) or isinstance(results, DiscreteResults)): raise SkipTest res = results fitted = res.fittedvalues assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12) assert_allclose(fitted, res.predict(), rtol=1e-12) def check_predict_types(results): res = results # squeeze to make 1d for single regressor test case p_exog = np.squeeze(np.asarray(res.model.exog[:2])) # ignore wrapper for isinstance check from statsmodels.genmod.generalized_linear_model import GLMResults from statsmodels.discrete.discrete_model import DiscreteResults # FIXME: work around GEE has no wrapper if hasattr(results, '_results'): results = results._results else: results = results if (isinstance(results, GLMResults) or isinstance(results, DiscreteResults)): # SMOKE test only TODO res.predict(p_exog) res.predict(p_exog.tolist()) res.predict(p_exog[0].tolist()) else: fitted = res.fittedvalues[:2] assert_allclose(fitted, res.predict(p_exog), rtol=1e-12) # this needs reshape to column-vector: assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()), rtol=1e-12) # only one prediction: assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()), rtol=1e-12) assert_allclose(fitted[:1], res.predict(p_exog[0]), rtol=1e-12) # predict doesn't preserve DataFrame, e.g. dot converts to ndarray #import pandas #predicted = res.predict(pandas.DataFrame(p_exog)) #assert_(isinstance(predicted, pandas.DataFrame)) #assert_allclose(predicted, fitted, rtol=1e-12)
bsd-3-clause
pv/scikit-learn
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
252
3490
#!/usr/bin/python # -*- coding: utf-8 -*- """ ============================================================================== Gaussian Processes classification example: exploiting the probabilistic output ============================================================================== A two-dimensional regression exercise with a post-processing allowing for probabilistic classification thanks to the Gaussian property of the prediction. The figure illustrates the probability that the prediction is negative with respect to the remaining uncertainty in the prediction. The red and blue lines corresponds to the 95% confidence interval on the prediction of the zero level set. """ print(__doc__) # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # Licence: BSD 3 clause import numpy as np from scipy import stats from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl from matplotlib import cm # Standard normal distribution functions phi = stats.distributions.norm().pdf PHI = stats.distributions.norm().cdf PHIinv = stats.distributions.norm().ppf # A few constants lim = 8 def g(x): """The function to predict (classification will then consist in predicting whether g(x) <= 0 or not)""" return 5. - x[:, 1] - .5 * x[:, 0] ** 2. # Design of experiments X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) # Observations y = g(X) # Instanciate and fit Gaussian Process Model gp = GaussianProcess(theta0=5e-1) # Don't perform MLE or you'll get a perfect prediction for this simple example! gp.fit(X, y) # Evaluate real function, the prediction and its MSE on a grid res = 50 x1, x2 = np.meshgrid(np.linspace(- lim, lim, res), np.linspace(- lim, lim, res)) xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T y_true = g(xx) y_pred, MSE = gp.predict(xx, eval_MSE=True) sigma = np.sqrt(MSE) y_true = y_true.reshape((res, res)) y_pred = y_pred.reshape((res, res)) sigma = sigma.reshape((res, res)) k = PHIinv(.975) # Plot the probabilistic classification iso-values using the Gaussian property # of the prediction fig = pl.figure(1) ax = fig.add_subplot(111) ax.axes.set_aspect('equal') pl.xticks([]) pl.yticks([]) ax.set_xticklabels([]) ax.set_yticklabels([]) pl.xlabel('$x_1$') pl.ylabel('$x_2$') cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8, extent=(- lim, lim, - lim, lim)) norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9) cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm) cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$') pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12) pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12) cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot') cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b', linestyles='solid') pl.clabel(cs, fontsize=11) cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k', linestyles='dashed') pl.clabel(cs, fontsize=11) cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r', linestyles='solid') pl.clabel(cs, fontsize=11) pl.show()
bsd-3-clause
aetilley/scikit-learn
examples/cluster/plot_lena_compress.py
271
2229
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Vector Quantization Example ========================================================= The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn import cluster n_clusters = 5 np.random.seed(0) try: lena = sp.lena() except AttributeError: # Newer versions of scipy have lena in misc from scipy import misc lena = misc.lena() X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values lena_compressed = np.choose(labels, values) lena_compressed.shape = lena.shape vmin = lena.min() vmax = lena.max() # original lena plt.figure(1, figsize=(3, 2.2)) plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256) # compressed lena plt.figure(2, figsize=(3, 2.2)) plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # equal bins lena regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, lena) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_lena = np.choose(regular_labels.ravel(), regular_values) regular_lena.shape = lena.shape plt.figure(3, figsize=(3, 2.2)) plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # histogram plt.figure(4, figsize=(3, 2.2)) plt.clf() plt.axes([.01, .01, .98, .98]) plt.hist(X, bins=256, color='.5', edgecolor='.5') plt.yticks(()) plt.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') plt.show()
bsd-3-clause
azjps/bokeh
bokeh/charts/builders/horizon_builder.py
6
6668
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Horizon class which lets you build your Horizon charts just passing the arguments to the Chart class and calling the proper functions. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import from bokeh.charts.builder import create_and_build from bokeh.charts.glyphs import HorizonGlyph from .line_builder import LineBuilder from ...core.properties import Float, Int, List, string_types, String, Color, Bool from ..attributes import ColorAttr, IdAttr from ...models.sources import ColumnDataSource from ...models.axes import CategoricalAxis from ...models.ranges import FactorRange, DataRange1d #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- def Horizon(data=None, x=None, y=None, series=None, **kws): """ Create a horizon chart using :class:`HorizonBuilder <bokeh.charts.builders.scatter_builder.HorizonBuilder>` to render the geometry from values. Args: data (:ref:`userguide_charts_data_types`): table-like data x (str or list(str), optional): the column label to use for the x dimension y (str or list(str), optional): the column label to use for the y dimension In addition to the parameters specific to this chart, :ref:`userguide_charts_defaults` are also accepted as keyword parameters. Returns: :class:`Chart`: includes glyph renderers that generate the scatter points Examples: .. bokeh-plot:: :source-position: above import pandas as pd from bokeh.charts import Horizon, output_file, show # read in some stock data from the Yahoo Finance API AAPL = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) MSFT = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) IBM = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) data = dict([ ('AAPL', AAPL['Adj Close']), ('Date', AAPL['Date']), ('MSFT', MSFT['Adj Close']), ('IBM', IBM['Adj Close'])] ) hp = Horizon(data, x='Date', plot_width=800, plot_height=300, title="horizon plot using stock inputs") output_file("horizon.html") show(hp) """ kws['x'] = x kws['y'] = y kws['series'] = series tools = kws.get('tools', True) if tools == True: tools = "save,reset" elif isinstance(tools, string_types): tools = tools.replace('pan', '') tools = tools.replace('wheel_zoom', '') tools = tools.replace('box_zoom', '') tools = tools.replace(',,', ',') kws['tools'] = tools chart = create_and_build(HorizonBuilder, data, **kws) # Hide numerical axis chart.left[0].visible = False # Add the series names to the y axis chart.extra_y_ranges = {"series": FactorRange(factors=chart._builders[0].series_names)} chart.add_layout(CategoricalAxis(y_range_name="series"), 'left') return chart class HorizonBuilder(LineBuilder): """Produces glyph renderers representing a horizon chart from many input types. The builder handles ingesting the data, deriving settings when not provided, building the renderers, then setting ranges, and modifying the chart as needed. """ # class configuration glyph = HorizonGlyph default_attributes = {'color': ColorAttr(sort=False), 'series': IdAttr(sort=False)} # primary input properties pos_color = Color("#006400", help=""" The color of the positive folds. (default: "#006400") """) neg_color = Color("#6495ed", help=""" The color of the negative folds. (default: "#6495ed") """) num_folds = Int(3, help=""" The number of folds stacked on top of each other. (default: 3) """) flip_neg = Bool(default=True, help="""When True, the negative values will be plotted as their absolute value, then their individual axes is flipped. If False, then the negative values will still be taken as their absolute value, but the base of their shape will start from the same origin as the positive values. """) # derived properties series_count = Int(help="""Count of the unique series names.""") bins = List(Float, help="""The binedges calculated from the number of folds, and the maximum value of the entire source data.""") series_column = String(help="""The column that contains the series names.""") fold_height = Float(help="""The size of the bin.""") def setup(self): super(HorizonBuilder, self).setup() # collect series names and columns selected to color by if self.attributes['series'].columns is None: self.series_column = self.attributes['color'].columns[0] else: self.series_column = self.attributes['series'].columns[0] if len(self.series_names) == 0: self.set_series(self.series_column) self.series_count = len(self.series_names) def process_data(self): super(HorizonBuilder, self).process_data() # calculate group attributes, useful for each horizon glyph self.fold_height = max(self.y.max, abs(self.y.min))/self.num_folds self.bins = [bin_id * self.fold_height for bin_id in range(self.num_folds + 1)] # manually set attributes to have constant color ds = ColumnDataSource(self._data.df) self.attributes['series'].setup(data=ds, columns=self.series_column) self.attributes['color'].setup(data=ds, columns=self.pos_color) def set_ranges(self): super(HorizonBuilder, self).set_ranges() self.x_range = DataRange1d(range_padding=0) self.y_range.start = 0 self.y_range.end = self.y.max
bsd-3-clause
pscholl/imustat
detect_peaks.py
5
6615
"""Detect peaks in data based on their amplitude and other features.""" from __future__ import division, print_function import numpy as np __author__ = "Marcos Duarte, https://github.com/demotu/BMC" __version__ = "1.0.4" __license__ = "MIT" def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=None): """Detect peaks in data based on their amplitude and other features. Parameters ---------- x : 1D array_like data. mph : {None, number}, optional (default = None) detect peaks that are greater than minimum peak height. mpd : positive integer, optional (default = 1) detect peaks that are at least separated by minimum peak distance (in number of data). threshold : positive number, optional (default = 0) detect peaks (valleys) that are greater (smaller) than `threshold` in relation to their immediate neighbors. edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising') for a flat peak, keep only the rising edge ('rising'), only the falling edge ('falling'), both edges ('both'), or don't detect a flat peak (None). kpsh : bool, optional (default = False) keep peaks with same height even if they are closer than `mpd`. valley : bool, optional (default = False) if True (1), detect valleys (local minima) instead of peaks. show : bool, optional (default = False) if True (1), plot data in matplotlib figure. ax : a matplotlib.axes.Axes instance, optional (default = None). Returns ------- ind : 1D array_like indeces of the peaks in `x`. Notes ----- The detection of valleys instead of peaks is performed internally by simply negating the data: `ind_valleys = detect_peaks(-x)` The function can handle NaN's See this IPython Notebook [1]_. References ---------- .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb Examples -------- >>> from detect_peaks import detect_peaks >>> x = np.random.randn(100) >>> x[60:81] = np.nan >>> # detect all peaks and plot data >>> ind = detect_peaks(x, show=True) >>> print(ind) >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 >>> # set minimum peak height = 0 and minimum peak distance = 20 >>> detect_peaks(x, mph=0, mpd=20, show=True) >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0] >>> # set minimum peak distance = 2 >>> detect_peaks(x, mpd=2, show=True) >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 >>> # detection of valleys instead of peaks >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True) >>> x = [0, 1, 1, 0, 1, 1, 0] >>> # detect both edges >>> detect_peaks(x, edge='both', show=True) >>> x = [-2, 1, -2, 2, 1, 1, 3, 0] >>> # set threshold = 2 >>> detect_peaks(x, threshold = 2, show=True) """ x = np.atleast_1d(x).astype('float64') if x.size < 3: return np.array([], dtype=int) if valley: x = -x # find indexes of all peaks dx = x[1:] - x[:-1] # handle NaN's indnan = np.where(np.isnan(x))[0] if indnan.size: x[indnan] = np.inf dx[np.where(np.isnan(dx))[0]] = np.inf ine, ire, ife = np.array([[], [], []], dtype=int) if not edge: ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0] else: if edge.lower() in ['rising', 'both']: ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0] if edge.lower() in ['falling', 'both']: ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0] ind = np.unique(np.hstack((ine, ire, ife))) # handle NaN's if ind.size and indnan.size: # NaN's and values close to NaN's cannot be peaks ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)] # first and last values of x cannot be peaks if ind.size and ind[0] == 0: ind = ind[1:] if ind.size and ind[-1] == x.size-1: ind = ind[:-1] # remove peaks < minimum peak height if ind.size and mph is not None: ind = ind[x[ind] >= mph] # remove peaks - neighbors < threshold if ind.size and threshold > 0: dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0) ind = np.delete(ind, np.where(dx < threshold)[0]) # detect small peaks closer than minimum peak distance if ind.size and mpd > 1: ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height idel = np.zeros(ind.size, dtype=bool) for i in range(ind.size): if not idel[i]: # keep peaks with the same height if kpsh is True idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \ & (x[ind[i]] > x[ind] if kpsh else True) idel[i] = 0 # Keep current peak # remove the small peaks and sort back the indexes by their occurrence ind = np.sort(ind[~idel]) if show: if indnan.size: x[indnan] = np.nan if valley: x = -x _plot(x, mph, mpd, threshold, edge, valley, ax, ind) return ind def _plot(x, mph, mpd, threshold, edge, valley, ax, ind): """Plot results of the detect_peaks function, see its help.""" try: import matplotlib.pyplot as plt except ImportError: print('matplotlib is not available.') else: if ax is None: _, ax = plt.subplots(1, 1, figsize=(8, 4)) ax.plot(x, 'b', lw=1) if ind.size: label = 'valley' if valley else 'peak' label = label + 's' if ind.size > 1 else label ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8, label='%d %s' % (ind.size, label)) ax.legend(loc='best', framealpha=.5, numpoints=1) ax.set_xlim(-.02*x.size, x.size*1.02-1) ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max() yrange = ymax - ymin if ymax > ymin else 1 ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange) ax.set_xlabel('Data #', fontsize=14) ax.set_ylabel('Amplitude', fontsize=14) mode = 'Valley detection' if valley else 'Peak detection' ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')" % (mode, str(mph), mpd, str(threshold), edge)) # plt.grid() plt.show()
gpl-3.0
rohanp/scikit-learn
sklearn/preprocessing/tests/test_label.py
156
17626
import numpy as np from scipy.sparse import issparse from scipy.sparse import coo_matrix from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.utils.multiclass import type_of_target from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.preprocessing.label import LabelBinarizer from sklearn.preprocessing.label import MultiLabelBinarizer from sklearn.preprocessing.label import LabelEncoder from sklearn.preprocessing.label import label_binarize from sklearn.preprocessing.label import _inverse_binarize_thresholding from sklearn.preprocessing.label import _inverse_binarize_multiclass from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_label_binarizer(): lb = LabelBinarizer() # one-class case defaults to negative label inp = ["pos", "pos", "pos", "pos"] expected = np.array([[0, 0, 0, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) # two-class case inp = ["neg", "pos", "pos", "neg"] expected = np.array([[0, 1, 1, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["neg", "pos"]) assert_array_equal(expected, got) to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) assert_array_equal(lb.inverse_transform(to_invert), inp) # multi-class case inp = ["spam", "ham", "eggs", "ham", "0"] expected = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]) got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam']) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) def test_label_binarizer_unseen_labels(): lb = LabelBinarizer() expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) got = lb.fit_transform(['b', 'd', 'e']) assert_array_equal(expected, got) expected = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]) got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f']) assert_array_equal(expected, got) def test_label_binarizer_set_label_encoding(): lb = LabelBinarizer(neg_label=-2, pos_label=0) # two-class case with pos_label=0 inp = np.array([0, 1, 1, 0]) expected = np.array([[-2, 0, 0, -2]]).T got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) lb = LabelBinarizer(neg_label=-2, pos_label=2) # multi-class case inp = np.array([3, 2, 1, 2, 0]) expected = np.array([[-2, -2, -2, +2], [-2, -2, +2, -2], [-2, +2, -2, -2], [-2, -2, +2, -2], [+2, -2, -2, -2]]) got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) @ignore_warnings def test_label_binarizer_errors(): # Check that invalid arguments yield ValueError one_class = np.array([0, 0, 0, 0]) lb = LabelBinarizer().fit(one_class) multi_label = [(2, 3), (0,), (0, 2)] assert_raises(ValueError, lb.transform, multi_label) lb = LabelBinarizer() assert_raises(ValueError, lb.transform, []) assert_raises(ValueError, lb.inverse_transform, []) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2) assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2, sparse_output=True) # Fail on y_type assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2], threshold=0) # Sequence of seq type should raise ValueError y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs) # Fail on the number of classes assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2, 3], threshold=0) # Fail on the dimension of 'binary' assert_raises(ValueError, _inverse_binarize_thresholding, y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary", classes=[1, 2, 3], threshold=0) # Fail on multioutput data assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]])) assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]), [1, 2, 3]) def test_label_encoder(): # Test LabelEncoder's transform and inverse_transform methods le = LabelEncoder() le.fit([1, 1, 4, 5, -1, 0]) assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1]) assert_raises(ValueError, le.transform, [0, 6]) def test_label_encoder_fit_transform(): # Test fit_transform le = LabelEncoder() ret = le.fit_transform([1, 1, 4, 5, -1, 0]) assert_array_equal(ret, [2, 2, 3, 4, 0, 1]) le = LabelEncoder() ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"]) assert_array_equal(ret, [1, 1, 2, 0]) def test_label_encoder_errors(): # Check that invalid arguments yield ValueError le = LabelEncoder() assert_raises(ValueError, le.transform, []) assert_raises(ValueError, le.inverse_transform, []) # Fail on unseen labels le = LabelEncoder() le.fit([1, 2, 3, 1, -1]) assert_raises(ValueError, le.inverse_transform, [-1]) def test_sparse_output_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for sparse_output in [True, False]: for inp in inputs: # With fit_tranform mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit_transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit(inp()).transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) assert_raises(ValueError, mlb.inverse_transform, csr_matrix(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]]))) def test_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for inp in inputs: # With fit_tranform mlb = MultiLabelBinarizer() got = mlb.fit_transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer() got = mlb.fit(inp()).transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) def test_multilabel_binarizer_empty_sample(): mlb = MultiLabelBinarizer() y = [[1, 2], [1], []] Y = np.array([[1, 1], [1, 0], [0, 0]]) assert_array_equal(mlb.fit_transform(y), Y) def test_multilabel_binarizer_unknown_class(): mlb = MultiLabelBinarizer() y = [[1, 2]] assert_raises(KeyError, mlb.fit(y).transform, [[0]]) mlb = MultiLabelBinarizer(classes=[1, 2]) assert_raises(KeyError, mlb.fit_transform, [[0]]) def test_multilabel_binarizer_given_classes(): inp = [(2, 3), (1,), (1, 2)] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # fit().transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # ensure works with extra class mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat))) assert_array_equal(mlb.classes_, [4, 1, 3, 2]) # ensure fit is no-op as iterable is not consumed inp = iter(inp) mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) def test_multilabel_binarizer_same_length_sequence(): # Ensure sequences of the same length are not interpreted as a 2-d array inp = [[1], [0], [2]] indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) def test_multilabel_binarizer_non_integer_labels(): tuple_classes = np.empty(3, dtype=object) tuple_classes[:] = [(1,), (2,), (3,)] inputs = [ ([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']), ([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']), ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) for inp, classes in inputs: # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) mlb = MultiLabelBinarizer() assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})]) def test_multilabel_binarizer_non_unique(): inp = [(1, 1, 1, 0)] indicator_mat = np.array([[1, 1]]) mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) def test_multilabel_binarizer_inverse_validation(): inp = [(1, 1, 1, 0)] mlb = MultiLabelBinarizer() mlb.fit_transform(inp) # Not binary assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]])) # The following binary cases are fine, however mlb.inverse_transform(np.array([[0, 0]])) mlb.inverse_transform(np.array([[1, 1]])) mlb.inverse_transform(np.array([[1, 0]])) # Wrong shape assert_raises(ValueError, mlb.inverse_transform, np.array([[1]])) assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]])) def test_label_binarize_with_class_order(): out = label_binarize([1, 6], classes=[1, 2, 4, 6]) expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) assert_array_equal(out, expected) # Modified class order out = label_binarize([1, 6], classes=[1, 6, 4, 2]) expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) assert_array_equal(out, expected) out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(out, expected) def check_binarized_results(y, classes, pos_label, neg_label, expected): for sparse_output in [True, False]: if ((pos_label == 0 or neg_label != 0) and sparse_output): assert_raises(ValueError, label_binarize, y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) continue # check label_binarize binarized = label_binarize(y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) # check inverse y_type = type_of_target(y) if y_type == "multiclass": inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding(binarized, output_type=y_type, classes=classes, threshold=((neg_label + pos_label) / 2.)) assert_array_equal(toarray(inversed), toarray(y)) # Check label binarizer lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert_equal(issparse(inverse_output), issparse(y)) def test_label_binarize_binary(): y = [0, 1, 0] classes = [0, 1] pos_label = 2 neg_label = -1 expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected # Binary case where sparse_output = True will not result in a ValueError y = [0, 1, 0] classes = [0, 1] pos_label = 3 neg_label = 0 expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) yield check_binarized_results, y, classes, pos_label, neg_label, expected assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_label_binarize_multilabel(): y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = pos_label * y_ind y_sparse = [sparse_matrix(y_ind) for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix]] for y in [y_ind] + y_sparse: yield (check_binarized_results, y, classes, pos_label, neg_label, expected) assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_invalid_input_label_binarize(): assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2], pos_label=0, neg_label=1) def test_inverse_binarize_multiclass(): got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3)) assert_array_equal(got, np.array([1, 1, 0]))
bsd-3-clause
dsullivan7/scikit-learn
sklearn/utils/tests/test_fixes.py
281
1829
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org> # Justin Vincent # Lars Buitinck # License: BSD 3 clause import numpy as np from nose.tools import assert_equal from nose.tools import assert_false from nose.tools import assert_true from numpy.testing import (assert_almost_equal, assert_array_almost_equal) from sklearn.utils.fixes import divide, expit from sklearn.utils.fixes import astype def test_expit(): # Check numerical stability of expit (logistic function). # Simulate our previous Cython implementation, based on #http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16) assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)), decimal=16) x = np.arange(10) out = np.zeros_like(x, dtype=np.float32) assert_array_almost_equal(expit(x), expit(x, out=out)) def test_divide(): assert_equal(divide(.6, 1), .600000000000) def test_astype_copy_memory(): a_int32 = np.ones(3, np.int32) # Check that dtype conversion works b_float32 = astype(a_int32, dtype=np.float32, copy=False) assert_equal(b_float32.dtype, np.float32) # Changing dtype forces a copy even if copy=False assert_false(np.may_share_memory(b_float32, a_int32)) # Check that copy can be skipped if requested dtype match c_int32 = astype(a_int32, dtype=np.int32, copy=False) assert_true(c_int32 is a_int32) # Check that copy can be forced, and is the case by default: d_int32 = astype(a_int32, dtype=np.int32, copy=True) assert_false(np.may_share_memory(d_int32, a_int32)) e_int32 = astype(a_int32, dtype=np.int32) assert_false(np.may_share_memory(e_int32, a_int32))
bsd-3-clause
alvason/probability-insighter
code/mutation-drift-selection.py
1
11460
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <headingcell level=1> # Wright-Fisher model of mutation, selection and random genetic drift # <markdowncell> # A Wright-Fisher model has a fixed population size *N* and discrete non-overlapping generations. Each generation, each individual has a random number of offspring whose mean is proportional to the individual's fitness. Each generation, mutation may occur. Mutations may increase or decrease individual's fitness, which affects the chances of that individual's offspring in subsequent generations. # <markdowncell> # Here, I'm using a fitness model where some proportion of the time a mutation will have a fixed fitness effect, increasing or decreasing fitness by a fixed amount. # <headingcell level=2> # Setup # <codecell> import numpy as np import itertools # <headingcell level=2> # Make population dynamic model # <headingcell level=3> # Basic parameters # <codecell> pop_size = 100 # <codecell> seq_length = 10 # <codecell> alphabet = ['A', 'T'] # <codecell> base_haplotype = "AAAAAAAAAA" # <codecell> fitness_effect = 1.1 # fitness effect if a functional mutation occurs # <codecell> fitness_chance = 0.1 # chance that a mutation has a fitness effect # <headingcell level=3> # Population of haplotypes maps to counts and fitnesses # <markdowncell> # Store this as a lightweight Dictionary that maps a string to a count. All the sequences together will have count *N*. # <codecell> pop = {} # <codecell> pop["AAAAAAAAAA"] = 40 # <codecell> pop["AAATAAAAAA"] = 30 # <codecell> pop["AATTTAAAAA"] = 30 # <markdowncell> # *Map haplotype string to fitness float.* # <codecell> fitness = {} # <codecell> fitness["AAAAAAAAAA"] = 1.0 # <codecell> fitness["AAATAAAAAA"] = 1.05 # <codecell> fitness["AATTTAAAAA"] = 1.10 # <codecell> pop["AAATAAAAAA"] # <codecell> fitness["AAATAAAAAA"] # <headingcell level=3> # Add mutation # <codecell> mutation_rate = 0.005 # per gen per individual per site # <codecell> def get_mutation_count(): mean = mutation_rate * pop_size * seq_length return np.random.poisson(mean) # <codecell> def get_random_haplotype(): haplotypes = pop.keys() frequencies = [x/float(pop_size) for x in pop.values()] total = sum(frequencies) frequencies = [x / total for x in frequencies] return np.random.choice(haplotypes, p=frequencies) # <codecell> def get_mutant(haplotype): site = np.random.randint(seq_length) possible_mutations = list(alphabet) possible_mutations.remove(haplotype[site]) mutation = np.random.choice(possible_mutations) new_haplotype = haplotype[:site] + mutation + haplotype[site+1:] return new_haplotype # <markdowncell> # *Mutations have fitness effects* # <codecell> def get_fitness(haplotype): old_fitness = fitness[haplotype] if (np.random.random() < fitness_chance): return old_fitness * fitness_effect else: return old_fitness # <codecell> get_fitness("AAAAAAAAAA") # <markdowncell> # *If a mutation event creates a new haplotype, assign it a random fitness.* # <codecell> def mutation_event(): haplotype = get_random_haplotype() if pop[haplotype] > 1: pop[haplotype] -= 1 new_haplotype = get_mutant(haplotype) if new_haplotype in pop: pop[new_haplotype] += 1 else: pop[new_haplotype] = 1 if new_haplotype not in fitness: fitness[new_haplotype] = get_fitness(haplotype) # <codecell> mutation_event() # <codecell> pop # <codecell> fitness # <codecell> def mutation_step(): mutation_count = get_mutation_count() for i in range(mutation_count): mutation_event() # <headingcell level=3> # Genetic drift and fitness affect which haplotypes make it to the next generation # <markdowncell> # *Fitness weights the multinomial draw.* # <codecell> def get_offspring_counts(): haplotypes = pop.keys() frequencies = [pop[haplotype]/float(pop_size) for haplotype in haplotypes] fitnesses = [fitness[haplotype] for haplotype in haplotypes] weights = [x * y for x,y in zip(frequencies, fitnesses)] total = sum(weights) weights = [x / total for x in weights] return list(np.random.multinomial(pop_size, weights)) # <codecell> get_offspring_counts() # <codecell> def offspring_step(): counts = get_offspring_counts() for (haplotype, count) in zip(pop.keys(), counts): if (count > 0): pop[haplotype] = count else: del pop[haplotype] # <headingcell level=3> # Combine and iterate # <codecell> def time_step(): mutation_step() offspring_step() # <codecell> generations = 5 # <codecell> def simulate(): for i in range(generations): time_step() # <headingcell level=3> # Record # <markdowncell> # We want to keep a record of past population frequencies to understand dynamics through time. At each step in the simulation, we append to a history object. # <codecell> history = [] # <codecell> def simulate(): clone_pop = dict(pop) history.append(clone_pop) for i in range(generations): time_step() clone_pop = dict(pop) history.append(clone_pop) # <codecell> simulate() # <headingcell level=2> # Analyze trajectories # <headingcell level=3> # Calculate diversity # <codecell> def get_distance(seq_a, seq_b): diffs = 0 length = len(seq_a) assert len(seq_a) == len(seq_b) for chr_a, chr_b in zip(seq_a, seq_b): if chr_a != chr_b: diffs += 1 return diffs / float(length) # <codecell> def get_diversity(population): haplotypes = population.keys() haplotype_count = len(haplotypes) diversity = 0 for i in range(haplotype_count): for j in range(haplotype_count): haplotype_a = haplotypes[i] haplotype_b = haplotypes[j] frequency_a = population[haplotype_a] / float(pop_size) frequency_b = population[haplotype_b] / float(pop_size) frequency_pair = frequency_a * frequency_b diversity += frequency_pair * get_distance(haplotype_a, haplotype_b) return diversity # <codecell> def get_diversity_trajectory(): trajectory = [get_diversity(generation) for generation in history] return trajectory # <headingcell level=3> # Plot diversity # <codecell> %matplotlib inline import matplotlib.pyplot as plt import matplotlib as mpl # <codecell> def diversity_plot(): mpl.rcParams['font.size']=14 trajectory = get_diversity_trajectory() plt.plot(trajectory, "#447CCD") plt.ylabel("diversity") plt.xlabel("generation") # <headingcell level=3> # Analyze and plot divergence # <codecell> def get_divergence(population): haplotypes = population.keys() divergence = 0 for haplotype in haplotypes: frequency = population[haplotype] / float(pop_size) divergence += frequency * get_distance(base_haplotype, haplotype) return divergence # <codecell> def get_divergence_trajectory(): trajectory = [get_divergence(generation) for generation in history] return trajectory # <codecell> def divergence_plot(): mpl.rcParams['font.size']=14 trajectory = get_divergence_trajectory() plt.plot(trajectory, "#447CCD") plt.ylabel("divergence") plt.xlabel("generation") # <headingcell level=3> # Plot haplotype trajectories # <codecell> def get_frequency(haplotype, generation): pop_at_generation = history[generation] if haplotype in pop_at_generation: return pop_at_generation[haplotype]/float(pop_size) else: return 0 # <codecell> def get_trajectory(haplotype): trajectory = [get_frequency(haplotype, gen) for gen in range(generations)] return trajectory # <codecell> def get_all_haplotypes(): haplotypes = set() for generation in history: for haplotype in generation: haplotypes.add(haplotype) return haplotypes # <codecell> colors = ["#781C86", "#571EA2", "#462EB9", "#3F47C9", "#3F63CF", "#447CCD", "#4C90C0", "#56A0AE", "#63AC9A", "#72B485", "#83BA70", "#96BD60", "#AABD52", "#BDBB48", "#CEB541", "#DCAB3C", "#E49938", "#E68133", "#E4632E", "#DF4327", "#DB2122"] # <codecell> colors_lighter = ["#A567AF", "#8F69C1", "#8474D1", "#7F85DB", "#7F97DF", "#82A8DD", "#88B5D5", "#8FC0C9", "#97C8BC", "#A1CDAD", "#ACD1A0", "#B9D395", "#C6D38C", "#D3D285", "#DECE81", "#E8C77D", "#EDBB7A", "#EEAB77", "#ED9773", "#EA816F", "#E76B6B"] # <codecell> def stacked_trajectory_plot(xlabel="generation"): mpl.rcParams['font.size']=18 haplotypes = get_all_haplotypes() trajectories = [get_trajectory(haplotype) for haplotype in haplotypes] plt.stackplot(range(generations), trajectories, colors=colors_lighter) plt.ylim(0, 1) plt.ylabel("frequency") plt.xlabel(xlabel) # <headingcell level=3> # Plot SNP trajectories # <codecell> def get_snp_frequency(site, generation): minor_allele_frequency = 0.0 pop_at_generation = history[generation] for haplotype in pop_at_generation.keys(): allele = haplotype[site] frequency = pop_at_generation[haplotype] / float(pop_size) if allele != "A": minor_allele_frequency += frequency return minor_allele_frequency # <codecell> def get_snp_trajectory(site): trajectory = [get_snp_frequency(site, gen) for gen in range(generations)] return trajectory # <markdowncell> # Find all variable sites. # <codecell> def get_all_snps(): snps = set() for generation in history: for haplotype in generation: for site in range(seq_length): if haplotype[site] != "A": snps.add(site) return snps # <codecell> def snp_trajectory_plot(xlabel="generation"): mpl.rcParams['font.size']=18 snps = get_all_snps() trajectories = [get_snp_trajectory(snp) for snp in snps] data = [] for trajectory, color in itertools.izip(trajectories, itertools.cycle(colors)): data.append(range(generations)) data.append(trajectory) data.append(color) fig = plt.plot(*data) plt.ylim(0, 1) plt.ylabel("frequency") plt.xlabel(xlabel) # <headingcell level=2> # Scale up # <markdowncell> # Here, we scale up to more interesting parameter values. # <codecell> pop_size = 50 seq_length = 100 generations = 500 mutation_rate = 0.0001 # per gen per individual per site fitness_effect = 1.1 # fitness effect if a functional mutation occurs fitness_chance = 0.1 # chance that a mutation has a fitness effect # <markdowncell> # In this case there are $\mu$ = 0.01 mutations entering the population every generation. # <codecell> seq_length * mutation_rate # <markdowncell> # And the population genetic parameter $\theta$, which equals $2N\mu$, is 1. # <codecell> 2 * pop_size * seq_length * mutation_rate # <codecell> base_haplotype = ''.join(["A" for i in range(seq_length)]) pop.clear() fitness.clear() del history[:] pop[base_haplotype] = pop_size fitness[base_haplotype] = 1.0 # <codecell> simulate() # <codecell> plt.figure(num=None, figsize=(14, 14), dpi=80, facecolor='w', edgecolor='k') plt.subplot2grid((3,2), (0,0), colspan=2) stacked_trajectory_plot() plt.subplot2grid((3,2), (1,0), colspan=2) snp_trajectory_plot() plt.subplot2grid((3,2), (2,0)) diversity_plot() plt.subplot2grid((3,2), (2,1)) divergence_plot()
gpl-2.0
tlhallock/line-search-dfo
python/algorithms/trust_filter.py
1
20050
from math import inf as infinity from numpy import int as integral from dfo import dfo_model from dfo import polynomial_basis from numpy import reshape from numpy import bmat as blockmat from numpy import asarray from numpy import concatenate from numpy import asmatrix from numpy import dot from numpy import empty from numpy import zeros from numpy import arange from numpy import random from numpy.linalg import cond as condition_number from numpy.linalg import lstsq from numpy.linalg import norm as norm from numpy.linalg import solve as linsolve from numpy.linalg import pinv from scipy.optimize import minimize from scipy.optimize import linprog import matplotlib.pyplot as plt from utilities import trust import matplotlib.patches as patches from numpy import setdiff1d from utilities.nondom import NonDomSet class Constants: def __init__(self): self.delta = 1 self.gamma_0 = .1 self.gamma_1 = .5 self.gamma_2 = 4 self.eta_1 = .9 self.eta_2 = .9 self.gamma_theta = 1e-4 self.kappa_delta = .7 self.kappa_theta = 1e-4 self.kappa_mu = 100 self.mu = .01 self.psi = 2 self.kappa_tmd = .01 class Results: def __init__(self): self.number_of_iterations = 0 self.restorations = 0 self.filter_modified_count = 0 self.success = False self.f_min = infinity self.x_min = 0 self.filterRejectedCount = 0 def newF(self, otherX, otherF): if self.f_min < otherF: return self.f_min = otherF self.x_min = otherX def theta(cEq, cIneq, active=None): if active is None: raise Exception('Not implemented.') active_c = cIneq[active] return norm(cEq) + norm(active_c), dot(cEq, cEq) + dot(active_c, active_c) def _createModelFunction(program, radius, xsi): b = polynomial_basis.PolynomialBasis(len(program.x0), 2) equalityIndices = empty(program.getNumEqualityConstraints(), dtype=int) inequalityIndices = empty(program.getNumInequalityConstraints(), dtype=int) funs = [] index = 0 funs.append(program.f) objectiveIndex = int(index) index += 1 for i in range(len(equalityIndices)): funs.append(program.eq.getFunction(i)) equalityIndices[i] = int(index) index += 1 for i in range(len(inequalityIndices)): funs.append(program.ineq.getFunction(i)) inequalityIndices[i] = int(index) index += 1 model = dfo_model.MultiFunctionModel(funs, b, program.x0, radius, xsi) return (model, objectiveIndex, equalityIndices, inequalityIndices) # OMG, I can't figure out how I am calling this incorrectly, but with an infeasible problem the method is returning a result def dbl_check_sol(cons, res): if not res.success: return True for c in cons: if c['type'] == 'ineq': if (c['fun'](res.x) < -1e-3).any(): return False elif c['type'] == 'eq': if norm(c['fun'](res.x)) > 1e-3: return False else: raise Exception('unknown type of constraint') return True class AlgorithmState: def __init__(self, statement, constants): self.tol = statement.tol self.x = statement.x0 # the model function: self.model, _, self.equalityIndices, self.inequalityIndices = _createModelFunction(statement, constants.delta, 1e-1) # model function self.mf = None self.mh = None self.mg = None # the filter self.pareto = NonDomSet() # Information of objective self.f = infinity self.grad = None self.hess = None # Information of constraints self.theta = None self.theta2 = None self.A = None self.c = None self.cIneq = None self.AIneq = None self.cEq = None self.AEq = None self.active = None # The hessian of the lagrangian self.H = None # The current step self.n = None self.t = None self.s = None self.r = None self.x_new = None def computeCurrentValues(self, plotFile): self.r = None self.n = None self.t = None self.s = None self.q = None self.x_new = None self.model.computeValueFromDelegate(self.x) self.model.setNewModelCenter(self.x) self.model.improve(plotFile) self.mf = self.model.getQuadraticModel(0) self.mh = self.model.getQuadraticModels(self.equalityIndices) self.mg = self.model.getQuadraticModels(self.inequalityIndices) self.f = self.mf.evaluate(self.x) self.grad = self.mf.gradient(self.x) self.hess = self.mf.hessian(self.x) self.cEq = self.mh.evaluate(self.x) self.AEq = self.mh.jacobian(self.x) self.cIneq = self.mg.evaluate(self.x) self.AIneq = self.mg.jacobian(self.x) self.computeActiveConstraints() self.computeHessianOfLagrangian() self.theta, self.theta2 = theta(self.cEq, self.cIneq, self.active) def computeHessianOfLagrangian(self): n = len(self.x) m = self.AEq.shape[0] + self.AIneq.shape[0] FULL_KKT = blockmat([ [self.hess, self.AEq.T, self.AIneq.T], [self.AEq, zeros([self.AEq.shape[0], m])], [self.AIneq, zeros([self.AIneq.shape[0], m])]]) FULL_RHS = concatenate([-self.grad, self.cEq, self.cIneq]) if condition_number(FULL_KKT) > 1000: # results.restorations += 1 # state.x = restore_feasibility(program, state.x) print('Inverting singular matrix to find lagrange multipliers.') #vec = linsolve(FULL_KKT, FULL_RHS.T) vec = lstsq(FULL_KKT, FULL_RHS.T)[0] # newton_direction = -vec[:n] dual_variables = -vec[n:] qs = self.model.getQuadraticModels(arange(1,m+1)).hessian(self.x) self.H = self.hess # I am sure I could use einstien summation notation or something for i in range(len(dual_variables)): self.H += dual_variables[i] * qs[i] def computeActiveConstraints(self): if not len(self.cEq) == 0 and len(self.cIneq) == 0: self.active = empty(0) self.A = self.AEq self.c = self.AEq return if not len(self.cIneq) == 0: self.active = self.cIneq > -self.tol cIneqActive = self.cIneq[self.active] aIneqActive = self.AIneq[self.active] if len(self.cEq) == 0: self.c = cIneqActive self.A = aIneqActive else: self.c = concatenate([self.cEq, cIneqActive]) self.A = blockmat([[self.AEq], [aIneqActive]]) return self.c = empty(0) self.A = empty([0, 0]) self.active = empty(0) def computeChi(self): if self.n is None: return None, False c = self.grad + dot(self.H, self.n) # A_ub = self.AIneq # b_ub = -self.cIneq # b_ub = -self.cIneq # A_eq = self.AEq # b_eq = zeros(self.AEq.shape[0]) cons = [{'type': 'ineq', 'fun': lambda t: self.cIneq - dot(self.AIneq, t), 'jac': lambda n: -self.AIneq}, {'type': 'ineq', 'fun': lambda t: 1 - dot(t, t), 'jac': lambda t: reshape(-2 * t, (1, self.getN()))}, {'type': 'eq', 'fun': lambda t: dot(self.AEq, t), 'jac': lambda t: self.AEq}] res_cons = minimize(lambda t: dot(c, t), jac=lambda t: c, x0=zeros(len(self.x)), constraints=cons, method='SLSQP', options={"disp": False, "maxiter": 1000}, tol=self.tol) if dbl_check_sol(cons, res_cons): return abs(res_cons.fun), True else: return None, False # result = linprog(c=self.grad + dot(self.H, self.n), # A_ub= self.AIneq, b_ub= -self.cIneq, # A_eq=self.AEq, b_eq=zeros(self.AEq.shape[0])) # if result.success: # return abs(result.fun), True # else: # return None, False def computeNormalComponent(self): if not any(self.active): self.n = zeros(len(self.x)) return tr_jac_dim=(1, self.getN()) # A^T (A * A^T)^-1 c # A^T (A * A^T)^-1 c initialN = -asarray(dot(dot(self.A.T, pinv(dot(self.A, self.A.T))), self.c)).flatten() - self.x cons = [{'type': 'ineq', 'fun': lambda n: -self.cIneq - dot(self.AIneq, n), 'jac': lambda n: -self.AIneq}, {'type': 'ineq', 'fun': lambda n: self.model.modelRadius**2 - dot(n, n), 'jac': lambda n: reshape(-2*n, tr_jac_dim)}, {'type': 'eq', 'fun': lambda n: self.cEq + dot(self.AEq, n), 'jac': lambda n: self.AEq}] res_cons = minimize(lambda n: dot(n, n), jac=lambda n: 2 * n, x0=initialN, constraints=cons, method='SLSQP', options={"disp": False, "maxiter": 1000}, tol=self.tol) if res_cons.success and dbl_check_sol(cons, res_cons): self.n = res_cons.x pass else: self.n = None def computeTangentialStep(self): rhs1 = self.cIneq + dot(self.AIneq, self.n) cons = [{'type': 'ineq', 'fun': lambda t: -rhs1 - dot(self.AIneq, t), 'jac': lambda t: -self.AIneq}, {'type': 'ineq', 'fun': lambda t: self.model.modelRadius ** 2 - dot(self.n + t, self.n + t), 'jac': lambda t: reshape(-2 * (self.n + t), (1, len(self.x)))}, {'type': 'eq', 'fun': lambda t: dot(self.AEq, t), 'jac': lambda t: self.AEq}] # TODO: If zeros are not cutting it: # x0 = 2 * random.rand(len(self.x)) - 1 res_cons = minimize( lambda t: dot(self.grad + dot(self.H, self.n), t) + .5 * dot(t.T, dot(self.H, t)), jac=lambda t: self.grad + dot(self.H, self.n) + dot(self.H, t), x0=zeros(len(self.x)), constraints=cons, method='SLSQP', options={"disp": False, "maxiter": 1000}, tol=self.tol) if res_cons.success and dbl_check_sol(cons, res_cons): self.t = res_cons.x return res_cons.x, True else: self.t = None return None, False # def computeQuadraticNormalComponent(self): # if not any(self.active): # self.n = zeros(len(self.x)) # return # # tr_jac_dim=(1, self.getN()) # initialN = -asarray(dot(dot(self.A.T, pinv(dot(self.A, self.A.T))), self.c)).flatten() - self.x # cons = [{'type': 'ineq', # 'fun': lambda n: -self.cIneq - dot(self.AIneq, n), # 'jac': lambda n: -self.AIneq}, # {'type': 'ineq', # 'fun': lambda n: self.model.modelRadius**2 - dot(n, n), # 'jac': lambda n: reshape(-2*n, tr_jac_dim)}, # {'type': 'eq', # 'fun': lambda n: self.cEq + dot(self.AEq, n), # 'jac': lambda n: self.AEq}] # res_cons = minimize(lambda n: dot(n, n), jac=lambda n: 2 * n, x0=initialN, # constraints=cons, method='SLSQP', options={"disp": False, "maxiter": 1000}, tol=self.tol) # if res_cons.success and dbl_check_sol(cons, res_cons): # self.n = res_cons.x # pass # else: # self.n = None # # def computeQuadraticTangentialStep(self): # rhs1 = self.cIneq + dot(self.AIneq, self.n) # # cons = [{'type': 'ineq', # 'fun': lambda t: -rhs1 - dot(self.AIneq, t), # 'jac': lambda t: -self.AIneq}, # {'type': 'ineq', # 'fun': lambda t: self.model.modelRadius ** 2 - dot(self.n + t, self.n + t), # 'jac': lambda t: reshape(-2 * (self.n + t), (1, len(self.x)))}, # {'type': 'eq', # 'fun': lambda t: dot(self.AEq, t), # 'jac': lambda t: self.AEq}] # # # TODO: If zeros are not cutting it: # # x0 = 2 * random.rand(len(self.x)) - 1 # # res_cons = minimize( # lambda t: dot(self.grad + dot(self.H, self.n), t) + .5 * dot(t.T, dot(self.H, t)), # jac=lambda t: self.grad + dot(self.H, self.n) + dot(self.H, t), # x0=zeros(len(self.x)), constraints=cons, method='SLSQP', options={"disp": False, "maxiter": 1000}, tol=self.tol) # # if res_cons.success and dbl_check_sol(cons, res_cons): # self.t = res_cons.x # return res_cons.x, True # else: # self.t = None # return None, False def computeQuadraticStep(self): cons = [ {'type': 'ineq', 'fun': lambda t: -self.mg.evaluate(self.x + t), 'jac': lambda t: -self.mg.jacobian(self.x + t)}, {'type': 'ineq', 'fun': lambda t: self.model.modelRadius ** 2 - dot(t, t), 'jac': lambda t: reshape(-2 * t, (1, len(self.x)))}, {'type': 'eq', 'fun': lambda t: self.mh.evaluate(self.x + t), 'jac': lambda t: self.mh.jacobian(self.x + t)}] res_cons = minimize( lambda t: self.mf.evaluate(self.x + t), jac=lambda t: self.mf.gradient(self.x + t), x0=zeros(len(self.x)), constraints=cons, method='SLSQP', options={ "disp": True, "maxiter": 1000 }, tol=self.tol) if res_cons.success and dbl_check_sol(cons, res_cons): self.q = res_cons.x else: dbl_check_sol(cons, res_cons) self.q = None def evaluateAtTrialPoint(self): # compute function value at new point actualY, _ = self.model.computeValueFromDelegate(self.x_new) # compute theta violation_eq = actualY[self.equalityIndices] violation_ineq = actualY[self.inequalityIndices] active = violation_ineq > -self.tol theta_new, theta2_new = theta(violation_eq, violation_ineq, active) return actualY[0], theta_new, theta2_new def getN(self): return len(self.x) def getM(self): if self.A is None: return 0 return self.A.shape[0] def getPlotRadius(self): maxDist = max(norm(self.model.unshifted - self.x, axis=1)) if self.n is not None: maxDist = max(maxDist, norm(self.n)) if self.t is not None: maxDist = max(maxDist, norm(self.t)) if self.s is not None: maxDist = max(maxDist, norm(self.s - self.x)) maxDist = max(maxDist, self.model.modelRadius) return maxDist * 1.2 def show(self, statement, action, suffix): try: center = self.x radius = self.getPlotRadius() ax1 = statement.createBasePlotAt(center, radius, title=action) #, mf=lambda x: self.mf.evaluate(x) self.model.addPointsToPlot(center, radius) # amin(shifted, 0) totalDist = radius hw = .05 * totalDist hl = .1 * totalDist # ax1.add_patch(patches.Arrow( # x=self.x[0], y=self.x[1], # dx=(-self.model.modelRadius * self.grad[0] / norm(self.grad)), # dy=(-self.model.modelRadius * self.grad[1] / norm(self.grad)), # width=hw, # facecolor="black", edgecolor="black" # )) if self.s is not None: ax1.add_patch(patches.Arrow( x=self.x[0], y=self.x[1], dx=(self.s[0]), dy=(self.s[1]), width=hw, facecolor="blue", edgecolor="blue" )) if self.q is not None: ax1.add_patch(patches.Arrow( x=self.x[0], y=self.x[1], dx=self.q[0], dy=self.q[1], width=hw, facecolor="pink", edgecolor="pink" )) if self.n is not None: ax1.add_patch(patches.Arrow( x=self.x[0], y=self.x[1], dx=self.n[0], dy=self.n[1], width=hw, facecolor="yellow", edgecolor="yellow" )) if self.t is not None: ax1.add_patch(patches.Arrow( x=self.x[0] + self.n[0], y=self.x[1] + self.n[1], dx=self.t[0], dy=self.t[1], width=hw, facecolor="green", edgecolor="green" )) if self.r is not None: ax1.add_patch(patches.Arrow( x=self.x[0], y=self.x[1], dx=self.r[0], dy=self.r[1], width=hw, facecolor="red", edgecolor="red" )) # plt.arrow(x=self.x[0], y=self.x[1], # dx=self.r[0], dy=self.r[1], # # head_width=hw, head_length=hl, # fc='r', ec='r') plt.savefig(statement.getNextPlotFile(suffix)) plt.close() except: print('unable to plot') def delta(self): return self.model.modelRadius def decreaseRadius(self, constants): self.model.multiplyRadius((constants.gamma_0 + constants.gamma_1) / 2) def increaseRadius(self, constants): self.model.multiplyRadius((1 + constants.gamma_2) / 2) def restore_feasibility(program, constants, state, results, plot): results.restorations += 1 state.s = None state.n = None state.t = None state.x_new = None while True: def m_theta(x): ineq = state.mg.evaluate(x) eq = state.mh.evaluate(x) active = ineq > -state.tol return theta(eq, ineq, active) quad_model = state.model.createUnshiftedQuadraticModel(lambda x: m_theta(x)[1]) newx, _, _, _, _ = trust.trust(asmatrix(quad_model.b).T, asmatrix(quad_model.Q), 1) newx = asarray(newx).flatten() state.x_new = newx * state.model.modelRadius + state.model.modelCenter() theta_exp = quad_model.evaluate(newx) _, actual_theta, actual_theta2 = state.evaluateAtTrialPoint() rho = (state.theta2 - actual_theta2)/(state.theta2 - theta_exp) state.r = state.x_new - state.x if plot: state.show(program, 'restoration_step theta=' + str(state.theta) + ', new theta=' + str(actual_theta) + ', rho=' + str(rho) + ', radius=' + str(state.model.modelRadius), 'restore') if rho < constants.eta_1: state.decreaseRadius(constants) state.computeCurrentValues(program.getNextPlotFile('feasibility')) continue elif rho < constants.eta_2: state.x = state.x_new return True elif norm(state.r) >= state.delta() / 2: state.increaseRadius(constants) state.x = state.x_new return True elif state.delta() < state.tol: # we have converged to a local minimum of the constraints that is not feasible? return False else: state.x = state.x_new state.decreaseRadius(constants) return True def trust_filter(program, constants, plot=True): results = Results() state = AlgorithmState(program, constants) plot=True while True: # ensure poised and compute model functions state.computeCurrentValues(program.getNextPlotFile('improve') if plot else None) state.computeNormalComponent() if plot: state.show(program, 'normal_step: theta=' + str(state.theta) + ',radius=' + str(state.delta()), 'normal') # This is not the correct feasible region to check non-emptyness! chi, nonempty = state.computeChi() if plot or True: print("current x = " + str(state.x)) print("current theta = " + str(state.theta)) print("current chi = " + str(chi)) print("current radius = " + str(state.delta())) print("current function value = " + str(state.f)) print("current # steps rejected by filter = " + str(results.filterRejectedCount)) print("current # filter modified = " + str(results.filter_modified_count)) print("---------------------------------------") # check optimality if nonempty and state.theta < program.tol and chi < program.tol: if state.delta() > program.tol: state.decreaseRadius(constants) results.number_of_iterations += 1 continue results.newF(state.x, state.f) results.success = True return results state.computeQuadraticStep() if state.q is None: # check compatibility if state.n is None or norm(state.n) >= constants.kappa_delta * state.delta() * min(1, constants.kappa_mu * state.delta() ** constants.mu): if not restore_feasibility(program, constants, state, results, plot): results.success = False break continue state.computeTangentialStep() # This check was not in the paper... if state.t is None: print('Unable to compute t!!!!!!!!!') if not restore_feasibility(program, constants, state, results, plot): results.success = False break continue state.s = state.t + state.n else: state.s = state.q state.x_new = state.x + state.s if plot: state.show(program, 'computed tangential step', 'tangential_step') f_exp = state.model.interpolate(state.x_new)[0] f_new, theta_new, _ = state.evaluateAtTrialPoint() # check acceptability to the filter if state.pareto.is_dominated((theta_new, f_new)): state.decreaseRadius(constants) results.number_of_iterations += 1 results.filterRejectedCount += 1 continue # test if model accuracy is poor rho = (state.f - f_new) / (state.f - f_exp) if rho < constants.eta_1: state.decreaseRadius(constants) results.number_of_iterations += 1 continue # accept trail point state.x = state.x_new # check ratio of improvement in model to improvement in constraints if state.f - f_exp < constants.kappa_theta * state.theta ** constants.psi: state.pareto.add(((1 - constants.gamma_theta) * theta_new, state.f - constants.gamma_theta * theta_new)) results.filter_modified_count += 1 results.number_of_iterations += 1 continue # nothing to do in this case if rho < constants.eta_2: results.number_of_iterations += 1 continue # check the accuracy of the model functions if norm(state.s) < state.delta() / 2: state.decreaseRadius(constants) else: state.increaseRadius(constants) return results # # if (abs((state.model.interpolate(state.x)[0] - state.f) / state.f) > 1e-12).any(): # print(state.model.interpolate(state.x)[0]) # print(state.f) # raise Exception("These aren't the same?") # if (abs((state.model.interpolate(state.x_new)[0] - f_exp) / f_exp) > 1e-12).any(): # print(state.model.interpolate(state.x_new)[0]) # print(f_exp) # raise Exception("These aren't the same?")
gpl-3.0
YinongLong/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
161
1380
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
ssaeger/scikit-learn
benchmarks/bench_sgd_regression.py
283
5569
""" Benchmark for SGD regression Compares SGD regression against coordinate descent and Ridge on synthetic data. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # License: BSD 3 clause import numpy as np import pylab as pl import gc from time import time from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.datasets.samples_generator import make_regression if __name__ == "__main__": list_n_samples = np.linspace(100, 10000, 5).astype(np.int) list_n_features = [10, 100, 1000] n_test = 1000 noise = 0.1 alpha = 0.01 sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) for i, n_train in enumerate(list_n_samples): for j, n_features in enumerate(list_n_features): X, y, coef = make_regression( n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] print("=======================") print("Round %d %d" % (i, j)) print("n_features:", n_features) print("n_samples:", n_train) # Shuffle data idx = np.arange(n_train) np.random.seed(13) np.random.shuffle(idx) X_train = X_train[idx] y_train = y_train[idx] std = X_train.std(axis=0) mean = X_train.mean(axis=0) X_train = (X_train - mean) / std X_test = (X_test - mean) / std std = y_train.std(axis=0) mean = y_train.mean(axis=0) y_train = (y_train - mean) / std y_test = (y_test - mean) / std gc.collect() print("- benchmarking ElasticNet") clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False) tstart = time() clf.fit(X_train, y_train) elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) elnet_results[i, j, 1] = time() - tstart gc.collect() print("- benchmarking SGD") n_iter = np.ceil(10 ** 4.0 / n_train) clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False, n_iter=n_iter, learning_rate="invscaling", eta0=.01, power_t=0.25) tstart = time() clf.fit(X_train, y_train) sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) sgd_results[i, j, 1] = time() - tstart gc.collect() print("n_iter", n_iter) print("- benchmarking A-SGD") n_iter = np.ceil(10 ** 4.0 / n_train) clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False, n_iter=n_iter, learning_rate="invscaling", eta0=.002, power_t=0.05, average=(n_iter * n_train // 2)) tstart = time() clf.fit(X_train, y_train) asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) asgd_results[i, j, 1] = time() - tstart gc.collect() print("- benchmarking RidgeRegression") clf = Ridge(alpha=alpha, fit_intercept=False) tstart = time() clf.fit(X_train, y_train) ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) ridge_results[i, j, 1] = time() - tstart # Plot results i = 0 m = len(list_n_features) pl.figure('scikit-learn SGD regression benchmark results', figsize=(5 * 2, 4 * m)) for j in range(m): pl.subplot(m, 2, i + 1) pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]), label="ElasticNet") pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]), label="SGDRegressor") pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]), label="A-SGDRegressor") pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]), label="Ridge") pl.legend(prop={"size": 10}) pl.xlabel("n_train") pl.ylabel("RMSE") pl.title("Test error - %d features" % list_n_features[j]) i += 1 pl.subplot(m, 2, i + 1) pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]), label="ElasticNet") pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]), label="SGDRegressor") pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]), label="A-SGDRegressor") pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]), label="Ridge") pl.legend(prop={"size": 10}) pl.xlabel("n_train") pl.ylabel("Time [sec]") pl.title("Training time - %d features" % list_n_features[j]) i += 1 pl.subplots_adjust(hspace=.30) pl.show()
bsd-3-clause
kaichogami/scikit-learn
examples/text/document_classification_20newsgroups.py
27
10521
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features and demonstrates various classifiers that can efficiently handle sparse matrices. The dataset used in this example is the 20 newsgroups dataset. It will be automatically downloaded, then cached. The bar plot indicates the accuracy, training time (normalized) and test time (normalized) of each classifier. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function import logging import numpy as np from optparse import OptionParser import sys from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() ############################################################################### # Load some categories from the training set if opts.all_categories: categories = None else: categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] if opts.filtered: remove = ('headers', 'footers', 'quotes') else: remove = () print("Loading 20 newsgroups dataset for categories:") print(categories if categories else "all") data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42, remove=remove) print('data loaded') categories = data_train.target_names # for case categories == None def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print("%d documents - %0.3fMB (training set)" % ( len(data_train.data), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test.data), data_test_size_mb)) print("%d categories" % len(categories)) print() # split a training set and a test set y_train, y_test = data_train.target, data_test.target print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', non_negative=True, n_features=opts.n_features) X_train = vectorizer.transform(data_train.data) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." ############################################################################### # Benchmark classifiers def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, category in enumerate(categories): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (category, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=categories)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"), (Perceptron(n_iter=50), "Perceptron"), (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(n_estimators=100), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(loss='l2', penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)), ('classification', LinearSVC()) ]))) # make some plots indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='navy') plt.barh(indices + .3, training_time, .2, label="training time", color='c') plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show()
bsd-3-clause
dileep-kishore/microbial-ai
microbial_ai/regulation/regulator.py
1
4426
# @Author: dileep # @Last Modified by: dileep from typing import List, Dict import numpy as np from .memory import Action, Memory # from sklearn.preprocessing import minmax_scale, maxabs_scale class Regulator: """ Base regulator class Parameters --------- dfba_obj : DFBA DFBA instance Attributes ---------- state_size : int Dimensions of the observation space action_size : int Dimensions of the action space state_space : Dict[str, int] action_space : Dict[int, Tuple[str, str]] """ name = "Dummy" _type = 'fixed' _frac = 0.0 _batch_size = 100 _memory_size = int(1e5) # NOTE: Regulation layer only uses componenets in media that are exchanged by the organism def __init__(self, ex_reactions: List[str], ex_metabolites: List[str]) -> None: super().__init__() self.state_size = len(ex_metabolites) + len(ex_reactions) self.action_size = 2 * len(ex_reactions) self.state_space = {v: k for (k, v) in enumerate(ex_metabolites + ex_reactions)} pos = lambda x: [(y, '+') for y in x] neg = lambda x: [(y, '-') for y in x] self.action_space = dict(enumerate(pos(ex_reactions) + neg(ex_reactions))) self.memory = Memory(self._memory_size) def _encode_state(self, conc: Dict[str, float], flux: Dict[str, float]) -> np.ndarray: """ Converts the concentrations and fluxes to appropriate np.ndarray encoding Parameters --------- concentrations : Dict[str, float] Concentrations of the components of the media fluxes : Dict[str, float] Exchange fluxes of the reaction in the microbe """ # scaled_conc = {k: v for (k, v) in zip(conc.keys(), minmax_scale(list(conc.values())))} scaled_conc = {k: v / 1000 for (k, v) in conc.items()} # scaled_flux = {k: v for (k, v) in zip(flux.keys(), maxabs_scale(list(flux.values())))} scaled_flux = {k: v / 100 for (k, v) in flux.items()} state = np.zeros(self.state_size) for k, v in {**scaled_conc, **scaled_flux}.items(): state[self.state_space[k]] = v return state def _encode_action(self, action: Action) -> int: """ Converts action represented by the Action class into a np.ndarray Parameters --------- action : Action Action that needs to be encoded """ [action_rxnid] = action.phi.keys() # action_encoding = np.zeros(self.action_size, dtype=int) for pos, (rxnid, _) in self.action_space.items(): if rxnid == action_rxnid: # action_encoding[pos] = 1 action_pos = pos break # return action_encoding return action_pos def _decode_action(self, action_idx: int): """ Converts action index into corresponding Action class instance Parameters --------- action_idx : int Index of the selected action """ action_rxn, action_sign = self.action_space[action_idx] action_value = self._frac return Action(type=self._type, phi={action_rxn: (action_value, action_sign)}) def select_action(self, concentrations: Dict[str, float], fluxes: Dict[str, float]) -> Action: """ Select action based on concentrations and fluxes Parameters --------- concentrations : Dict[str, float] Concentrations of the components of the media fluxes : Dict[str, float] Exchange fluxes of the reaction in the microbe """ # state = self._encode_state(concentrations, fluxes) action_vals = np.zeros(self.action_size) action = np.argmax(action_vals) return self._decode_action(action) def save(self): """ Return model parameters to save """ return None def load(self, fname): """ Load model parameters from disk """ return None def update(self, state: Dict[str, float], action: Action, next_state: Dict[str, float], reward: float) -> None: pass def learn(self): pass
mit
fabioticconi/scikit-learn
benchmarks/bench_tree.py
297
3617
""" To run this, you'll need to have installed. * scikit-learn Does two benchmarks First, we fix a training set, increase the number of samples to classify and plot number of classified samples as a function of time. In the second benchmark, we increase the number of dimensions of the training set, classify a sample and plot the time taken as a function of the number of dimensions. """ import numpy as np import pylab as pl import gc from datetime import datetime # to store the results scikit_classifier_results = [] scikit_regressor_results = [] mu_second = 0.0 + 10 ** 6 # number of microseconds in a second def bench_scikit_tree_classifier(X, Y): """Benchmark with scikit-learn decision tree classifier""" from sklearn.tree import DecisionTreeClassifier gc.collect() # start time tstart = datetime.now() clf = DecisionTreeClassifier() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_classifier_results.append( delta.seconds + delta.microseconds / mu_second) def bench_scikit_tree_regressor(X, Y): """Benchmark with scikit-learn decision tree regressor""" from sklearn.tree import DecisionTreeRegressor gc.collect() # start time tstart = datetime.now() clf = DecisionTreeRegressor() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_regressor_results.append( delta.seconds + delta.microseconds / mu_second) if __name__ == '__main__': print('============================================') print('Warning: this is going to take a looong time') print('============================================') n = 10 step = 10000 n_samples = 10000 dim = 10 n_classes = 10 for i in range(n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') n_samples += step X = np.random.randn(n_samples, dim) Y = np.random.randint(0, n_classes, (n_samples,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(n_samples) bench_scikit_tree_regressor(X, Y) xx = range(0, n * step, step) pl.figure('scikit-learn tree benchmark results') pl.subplot(211) pl.title('Learning with varying number of samples') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') scikit_classifier_results = [] scikit_regressor_results = [] n = 10 step = 500 start_dim = 500 n_classes = 10 dim = start_dim for i in range(0, n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') dim += step X = np.random.randn(100, dim) Y = np.random.randint(0, n_classes, (100,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(100) bench_scikit_tree_regressor(X, Y) xx = np.arange(start_dim, start_dim + n * step, step) pl.subplot(212) pl.title('Learning in high dimensional spaces') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of dimensions') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
se-passau/SPLConqueror
SPLConqueror/PyML/pyScripts/Communication.py
1
6815
import sys import learning import parameterTuning import configParser import learnerExtraction from sklearn.tree import DecisionTreeRegressor as DTR from sklearn.ensemble.forest import RandomForestRegressor as RF from sklearn.svm import SVR from sys import argv from time import perf_counter # Messages received by the parent process SETTING_STREAM_START = "settings_start" SETTING_STREAM_END = "settings_end" START_LEARN = "start_learn" START_PARAM_TUNING = "start_param_tuning" # Messages sent to the parent process REQUESTING_CONFIGURATION = "req_configs" REQUESTING_LEARNING_RESULTS = "req_results" PASS_OK = "pass_ok" FINISHED_LEARNING = "learn_finished" REQUESTING_LEARNING_SETTINGS = "req_settings" debug = False # Output function to pass strings to C#, flushing the output buffer is required to make sure the string is written # in the stream def print_line(string): print(string) # flushing output buffer sys.stdout.flush() def check_prereq(model): return isinstance(model, DTR) or isinstance(model, RF) or (isinstance(model, SVR) and model.kernel=="linear") # format and print a list def print_line_array(array): output = "" for item in array: output += str(item) output += "," print(output) sys.stdout.flush() tree_path = "" # Function to request and then parse configurations. def get_configurations(learn_container, predict_container): print_line(REQUESTING_CONFIGURATION) line = input() config_and_nfp_file_learn = line.split(" ") print_line(PASS_OK) line = input() config_and_nfp_file_predict = line.split(" ") print_line(PASS_OK) global tree_path tree_path = input() print_line(PASS_OK) options = input() if config_and_nfp_file_learn[0].strip().endswith(".csv"): data = configParser.parse_from_csv(config_and_nfp_file_learn[0].strip(), config_and_nfp_file_predict[0].strip(), config_and_nfp_file_learn[1].strip(), config_and_nfp_file_predict[1].strip()) learn_container.conf_file = config_and_nfp_file_learn[0].strip() learn_container.nfp_file = config_and_nfp_file_learn[1].strip() predict_container.conf_file = config_and_nfp_file_predict[0].strip() predict_container.nfp_file = config_and_nfp_file_predict[1].strip() else: data = configParser.parse_from_plain_text(options.strip().split(","), config_and_nfp_file_learn[0].strip(), config_and_nfp_file_predict[0].strip(), config_and_nfp_file_learn[1].strip(), config_and_nfp_file_predict[1].strip()) learn_container.features = data[0] learn_container.results = data[2] predict_container.features = data[1] predict_container.results = data[3] print_line(PASS_OK) # Main method, that will be executed when executing this script. def main(): if argv[1].lower() == "true": global debug debug = True print("Debug output mode enabled. \n Processing input\n", file=sys.stderr, flush=True) configurations_learn = Configurations() configurations_predict = Configurations() learning_strategy = "" learner_settings = [] # Sequence for getting the basic learning settings from C# print_line(REQUESTING_LEARNING_SETTINGS) csharp_response = input() if csharp_response == SETTING_STREAM_START: learning_strategy = input() learner_setting = input() if debug: print("Received learning strategy:" + learning_strategy + "\n", file=sys.stderr, flush=True) print("Received learner setting:" + learner_setting + "\n", file=sys.stderr, flush=True) while learner_setting != SETTING_STREAM_END: # pair of settings passed by other application in format identifier=value learner_settings.append(learner_setting) learner_setting = input() if debug: print("Received learner setting:" + learner_setting + "\n", file=sys.stderr, flush=True) get_configurations(configurations_learn, configurations_predict) if debug: print("Found learning set. " + str(configurations_learn) + "\n", file=sys.stderr, flush=True) print("Found validation set. " + str(configurations_learn) + "\n", file=sys.stderr, flush=True) task = input() # perform prediction if task == START_LEARN: if debug: print("Starting the learning.\n", file=sys.stderr, flush=True) learning.number_of_configurations = len(configurations_learn.results) model = learning.Learner(learning_strategy, learner_settings) start = perf_counter() model.learn(configurations_learn.features, configurations_learn.results) elapsed = perf_counter() - start predictions = model.predict(configurations_predict.features) print_line(FINISHED_LEARNING) if debug: print("Finished the learning.\n", file=sys.stderr, flush=True) if input() == REQUESTING_LEARNING_RESULTS: if debug: print("Extracting trees.\n", file=sys.stderr, flush=True) print_line_array(predictions) if not tree_path.strip() is "" and check_prereq(model.learning_model): print_line(str(elapsed)) tree_file = open(tree_path, 'w') tree = learnerExtraction.extract(model.learning_model) if len(tree) == 1: tree_file.write(str(tree) + "\n") else: forest = tree for tree in forest: tree_file.write(str(tree) + "\n") tree_file.flush() tree_file.close() # perform parameter tuning elif task == START_PARAM_TUNING: if debug: print("Starting the learning.\n", file=sys.stderr, flush=True) parameterTuning.setOutputPath(input()) optimal_parameters = parameterTuning.optimizeParameter(learning_strategy, configurations_learn.features, configurations_learn.results, learner_settings) print_line(FINISHED_LEARNING) if input() == REQUESTING_LEARNING_RESULTS: print_line(optimal_parameters) # class to hold values passed by c# class Configurations: def __init__(self): self.results = [] self.features = [] self.conf_file = "" self.nfp_file = "" def append(self, nfp_value, features): self.results.append(nfp_value) self.features.append(features) def __str__(self): return "Configurations from file " + self.conf_file + " with nfps " + self.nfp_file + "." main()
gpl-2.0
themrmax/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
42
14294
import numpy as np from sklearn.utils.testing import (assert_equal, assert_array_almost_equal, assert_array_equal, assert_true, assert_raise_message) from sklearn.datasets import load_linnerud from sklearn.cross_decomposition import pls_, CCA def test_pls(): d = load_linnerud() X = d.data Y = d.target # 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A) # =========================================================== # Compare 2 algo.: nipals vs. svd # ------------------------------ pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1]) pls_bynipals.fit(X, Y) pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1]) pls_bysvd.fit(X, Y) # check equalities of loading (up to the sign of the second column) assert_array_almost_equal( pls_bynipals.x_loadings_, pls_bysvd.x_loadings_, decimal=5, err_msg="nipals and svd implementations lead to different x loadings") assert_array_almost_equal( pls_bynipals.y_loadings_, pls_bysvd.y_loadings_, decimal=5, err_msg="nipals and svd implementations lead to different y loadings") # Check PLS properties (with n_components=X.shape[1]) # --------------------------------------------------- plsca = pls_.PLSCanonical(n_components=X.shape[1]) plsca.fit(X, Y) T = plsca.x_scores_ P = plsca.x_loadings_ Wx = plsca.x_weights_ U = plsca.y_scores_ Q = plsca.y_loadings_ Wy = plsca.y_weights_ def check_ortho(M, err_msg): K = np.dot(M.T, M) assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(Wx, "x weights are not orthogonal") check_ortho(Wy, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(T, "x scores are not orthogonal") check_ortho(U, "y scores are not orthogonal") # Check X = TP' and Y = UQ' (with (p == q) components) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # center scale X, Y Xc, Yc, x_mean, y_mean, x_std, y_std =\ pls_._center_scale_xy(X.copy(), Y.copy(), scale=True) assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'") assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'") # Check that rotations on training data lead to scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xr = plsca.transform(X) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") Xr, Yr = plsca.transform(X, Y) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") assert_array_almost_equal(Yr, plsca.y_scores_, err_msg="rotation on Y failed") # "Non regression test" on canonical PLS # -------------------------------------- # The results were checked against the R-package plspm pls_ca = pls_.PLSCanonical(n_components=X.shape[1]) pls_ca.fit(X, Y) x_weights = np.array( [[-0.61330704, 0.25616119, -0.74715187], [-0.74697144, 0.11930791, 0.65406368], [-0.25668686, -0.95924297, -0.11817271]]) # x_weights_sign_flip holds columns of 1 or -1, depending on sign flip # between R and python x_weights_sign_flip = pls_ca.x_weights_ / x_weights x_rotations = np.array( [[-0.61330704, 0.41591889, -0.62297525], [-0.74697144, 0.31388326, 0.77368233], [-0.25668686, -0.89237972, -0.24121788]]) x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations y_weights = np.array( [[+0.58989127, 0.7890047, 0.1717553], [+0.77134053, -0.61351791, 0.16920272], [-0.23887670, -0.03267062, 0.97050016]]) y_weights_sign_flip = pls_ca.y_weights_ / y_weights y_rotations = np.array( [[+0.58989127, 0.7168115, 0.30665872], [+0.77134053, -0.70791757, 0.19786539], [-0.23887670, -0.00343595, 0.94162826]]) y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations # x_weights = X.dot(x_rotation) # Hence R/python sign flip should be the same in x_weight and x_rotation assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip) # This test that R / python give the same result up to column # sign indeterminacy assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip) assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) # 2) Regression PLS (PLS2): "Non regression test" # =============================================== # The results were checked against the R-packages plspm, misOmics and pls pls_2 = pls_.PLSRegression(n_components=X.shape[1]) pls_2.fit(X, Y) x_weights = np.array( [[-0.61330704, -0.00443647, 0.78983213], [-0.74697144, -0.32172099, -0.58183269], [-0.25668686, 0.94682413, -0.19399983]]) x_weights_sign_flip = pls_2.x_weights_ / x_weights x_loadings = np.array( [[-0.61470416, -0.24574278, 0.78983213], [-0.65625755, -0.14396183, -0.58183269], [-0.51733059, 1.00609417, -0.19399983]]) x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings y_weights = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) y_weights_sign_flip = pls_2.y_weights_ / y_weights y_loadings = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings # x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4) assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4) assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) # 3) Another non-regression test of Canonical PLS on random dataset # ================================================================= # The results were checked against the R-package plspm n = 500 p_noise = 10 q_noise = 5 # 2 latents vars: np.random.seed(11) l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X = np.concatenate( (X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1) Y = np.concatenate( (Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1) np.random.seed(None) pls_ca = pls_.PLSCanonical(n_components=3) pls_ca.fit(X, Y) x_weights = np.array( [[0.65803719, 0.19197924, 0.21769083], [0.7009113, 0.13303969, -0.15376699], [0.13528197, -0.68636408, 0.13856546], [0.16854574, -0.66788088, -0.12485304], [-0.03232333, -0.04189855, 0.40690153], [0.1148816, -0.09643158, 0.1613305], [0.04792138, -0.02384992, 0.17175319], [-0.06781, -0.01666137, -0.18556747], [-0.00266945, -0.00160224, 0.11893098], [-0.00849528, -0.07706095, 0.1570547], [-0.00949471, -0.02964127, 0.34657036], [-0.03572177, 0.0945091, 0.3414855], [0.05584937, -0.02028961, -0.57682568], [0.05744254, -0.01482333, -0.17431274]]) x_weights_sign_flip = pls_ca.x_weights_ / x_weights x_loadings = np.array( [[0.65649254, 0.1847647, 0.15270699], [0.67554234, 0.15237508, -0.09182247], [0.19219925, -0.67750975, 0.08673128], [0.2133631, -0.67034809, -0.08835483], [-0.03178912, -0.06668336, 0.43395268], [0.15684588, -0.13350241, 0.20578984], [0.03337736, -0.03807306, 0.09871553], [-0.06199844, 0.01559854, -0.1881785], [0.00406146, -0.00587025, 0.16413253], [-0.00374239, -0.05848466, 0.19140336], [0.00139214, -0.01033161, 0.32239136], [-0.05292828, 0.0953533, 0.31916881], [0.04031924, -0.01961045, -0.65174036], [0.06172484, -0.06597366, -0.1244497]]) x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings y_weights = np.array( [[0.66101097, 0.18672553, 0.22826092], [0.69347861, 0.18463471, -0.23995597], [0.14462724, -0.66504085, 0.17082434], [0.22247955, -0.6932605, -0.09832993], [0.07035859, 0.00714283, 0.67810124], [0.07765351, -0.0105204, -0.44108074], [-0.00917056, 0.04322147, 0.10062478], [-0.01909512, 0.06182718, 0.28830475], [0.01756709, 0.04797666, 0.32225745]]) y_weights_sign_flip = pls_ca.y_weights_ / y_weights y_loadings = np.array( [[0.68568625, 0.1674376, 0.0969508], [0.68782064, 0.20375837, -0.1164448], [0.11712173, -0.68046903, 0.12001505], [0.17860457, -0.6798319, -0.05089681], [0.06265739, -0.0277703, 0.74729584], [0.0914178, 0.00403751, -0.5135078], [-0.02196918, -0.01377169, 0.09564505], [-0.03288952, 0.09039729, 0.31858973], [0.04287624, 0.05254676, 0.27836841]]) y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4) assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_weights_, "x weights are not orthogonal") check_ortho(pls_ca.y_weights_, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_scores_, "x scores are not orthogonal") check_ortho(pls_ca.y_scores_, "y scores are not orthogonal") def test_PLSSVD(): # Let's check the PLSSVD doesn't return all possible component but just # the specified number d = load_linnerud() X = d.data Y = d.target n_components = 2 for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]: pls = clf(n_components=n_components) pls.fit(X, Y) assert_equal(n_components, pls.y_scores_.shape[1]) def test_univariate_pls_regression(): # Ensure 1d Y is correctly interpreted d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSRegression() # Compare 1d to column vector model1 = clf.fit(X, Y[:, 0]).coef_ model2 = clf.fit(X, Y[:, :1]).coef_ assert_array_almost_equal(model1, model2) def test_predict_transform_copy(): # check that the "copy" keyword works d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSCanonical() X_copy = X.copy() Y_copy = Y.copy() clf.fit(X, Y) # check that results are identical with copy assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False)) assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False)) # check also if passing Y assert_array_almost_equal(clf.transform(X, Y), clf.transform(X.copy(), Y.copy(), copy=False)) # check that copy doesn't destroy # we do want to check exact equality here assert_array_equal(X_copy, X) assert_array_equal(Y_copy, Y) # also check that mean wasn't zero before (to make sure we didn't touch it) assert_true(np.all(X.mean(axis=0) != 0)) def test_scale_and_stability(): # We test scale=True parameter # This allows to check numerical stability over platforms as well d = load_linnerud() X1 = d.data Y1 = d.target # causes X[:, -1].std() to be zero X1[:, -1] = 1.0 # From bug #2821 # Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0 # This test robustness of algorithm when dealing with value close to 0 X2 = np.array([[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [3., 5., 4.]]) Y2 = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]) for (X, Y) in [(X1, Y1), (X2, Y2)]: X_std = X.std(axis=0, ddof=1) X_std[X_std == 0] = 1 Y_std = Y.std(axis=0, ddof=1) Y_std[Y_std == 0] = 1 X_s = (X - X.mean(axis=0)) / X_std Y_s = (Y - Y.mean(axis=0)) / Y_std for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.set_params(scale=True) X_score, Y_score = clf.fit_transform(X, Y) clf.set_params(scale=False) X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s) assert_array_almost_equal(X_s_score, X_score) assert_array_almost_equal(Y_s_score, Y_score) # Scaling should be idempotent clf.set_params(scale=True) X_score, Y_score = clf.fit_transform(X_s, Y_s) assert_array_almost_equal(X_s_score, X_score) assert_array_almost_equal(Y_s_score, Y_score) def test_pls_errors(): d = load_linnerud() X = d.data Y = d.target for clf in [pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.n_components = 4 assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
bsd-3-clause
eickenberg/scikit-learn
examples/applications/plot_species_distribution_modeling.py
28
7434
""" ============================= Species distribution modeling ============================= Modeling species' geographic distributions is an important problem in conservation biology. In this example we model the geographic distribution of two south american mammals given past observations and 14 environmental variables. Since we have only positive examples (there are no unsuccessful observations), we cast this problem as a density estimation problem and use the `OneClassSVM` provided by the package `sklearn.svm` as our modeling tool. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause from __future__ import print_function from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.base import Bunch from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn import svm, metrics # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False print(__doc__) def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid): """Create a bunch with information about a particular organism This will use the test/train record arrays to extract the data specific to the given species name. """ bunch = Bunch(name=' '.join(species_name.split("_")[:2])) species_name = species_name.encode('ascii') points = dict(test=test, train=train) for label, pts in points.items(): # choose points associated with the desired species pts = pts[pts['species'] == species_name] bunch['pts_%s' % label] = pts # determine coverage values for each of the training & testing points ix = np.searchsorted(xgrid, pts['dd long']) iy = np.searchsorted(ygrid, pts['dd lat']) bunch['cov_%s' % label] = coverages[:, -iy, ix].T return bunch def plot_species_distribution(species=["bradypus_variegatus_0", "microryzomys_minutus_0"]): """ Plot the species distribution. """ if len(species) > 2: print("Note: when more than two species are provided," " only the first two will be used") t0 = time() # Load the compressed data data = fetch_species_distributions() # Set up the data grid xgrid, ygrid = construct_grids(data) # The grid in x,y coordinates X, Y = np.meshgrid(xgrid, ygrid[::-1]) # create a bunch for each species BV_bunch = create_species_bunch(species[0], data.train, data.test, data.coverages, xgrid, ygrid) MM_bunch = create_species_bunch(species[1], data.train, data.test, data.coverages, xgrid, ygrid) # background points (grid coordinates) for evaluation np.random.seed(13) background_points = np.c_[np.random.randint(low=0, high=data.Ny, size=10000), np.random.randint(low=0, high=data.Nx, size=10000)].T # We'll make use of the fact that coverages[6] has measurements at all # land points. This will help us decide between land and water. land_reference = data.coverages[6] # Fit, predict, and plot for each species. for i, species in enumerate([BV_bunch, MM_bunch]): print("_" * 80) print("Modeling distribution of species '%s'" % species.name) # Standardize features mean = species.cov_train.mean(axis=0) std = species.cov_train.std(axis=0) train_cover_std = (species.cov_train - mean) / std # Fit OneClassSVM print(" - fit OneClassSVM ... ", end='') clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5) clf.fit(train_cover_std) print("done.") # Plot map of South America plt.subplot(1, 2, i + 1) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) print(" - predict species distribution") # Predict species distribution using the training data Z = np.ones((data.Ny, data.Nx), dtype=np.float64) # We'll predict only for the land points. idx = np.where(land_reference > -9999) coverages_land = data.coverages[:, idx[0], idx[1]].T pred = clf.decision_function((coverages_land - mean) / std)[:, 0] Z *= pred.min() Z[idx[0], idx[1]] = pred levels = np.linspace(Z.min(), Z.max(), 25) Z[land_reference == -9999] = -9999 # plot contours of the prediction plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) plt.colorbar(format='%.2f') # scatter training/testing points plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'], s=2 ** 2, c='black', marker='^', label='train') plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'], s=2 ** 2, c='black', marker='x', label='test') plt.legend() plt.title(species.name) plt.axis('equal') # Compute AUC with regards to background points pred_background = Z[background_points[0], background_points[1]] pred_test = clf.decision_function((species.cov_test - mean) / std)[:, 0] scores = np.r_[pred_test, pred_background] y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)] fpr, tpr, thresholds = metrics.roc_curve(y, scores) roc_auc = metrics.auc(fpr, tpr) plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right") print("\n Area under the ROC curve : %f" % roc_auc) print("\ntime elapsed: %.2fs" % (time() - t0)) plot_species_distribution() plt.show()
bsd-3-clause
google-research/google-research
aloe/aloe/common/plot_2d.py
1
2275
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: skip-file import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np def plot_heatmap(pdf_func, out_name, size=3): w = 100 x = np.linspace(-size, size, w) y = np.linspace(-size, size, w) xx, yy = np.meshgrid(x, y) coords = np.stack([xx.flatten(), yy.flatten()]).transpose() scores = pdf_func(coords) a = scores.reshape((w, w)) plt.imshow(a) plt.axis('equal') plt.axis('off') plt.savefig(out_name, bbox_inches='tight') plt.close() def plot_samples(samples, out_name, lim=None, axis=True): plt.scatter(samples[:, 0], samples[:, 1], marker='.') plt.axis('equal') if lim is not None: plt.xlim(-lim, lim) plt.ylim(-lim, lim) if not axis: plt.axis('off') plt.savefig(out_name, bbox_inches='tight') plt.close() def plot_joint(dataset, samples, out_name): x = np.max(dataset) y = np.max(-dataset) z = np.ceil(max((x, y))) plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x') plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.') plt.legend(['training data', 'ADE sampled']) plt.axis('equal') plt.xlim(-z, z) plt.ylim(-z, z) plt.savefig(out_name, bbox_inches='tight') plt.close() fname = out_name.split('/')[-1] out_name = '/'.join(out_name.split('/')[:-1]) + '/none-' + fname plt.figure(figsize=(8, 8)) plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x') plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.') plt.axis('equal') plt.xlim(-z, z) plt.ylim(-z, z) plt.savefig(out_name, bbox_inches='tight') plt.close()
apache-2.0
clbarnes/plotstyles
generate_examples.py
1
1136
import json from nbformat import current as nbf from matplotlib import pyplot as plt # import subprocess template_path = 'templates/template.ipynb' output_path = 'plotstyles.ipynb' nb = nbf.new_notebook() with open(template_path) as f: template = json.load(f) out_cells = [] for cell in template['cells']: cell_type = cell['cell_type'] source = cell['source'] if cell_type == 'markdown': out_cells.append(nbf.new_text_cell('markdown', source)) else: out_cells.append(nbf.new_code_cell(source)) def make_title(s): return s.replace('-', ' ').replace('_', ' ').title() for style_str in sorted(plt.style.available): out_cells.append(nbf.new_text_cell('markdown', "### {} (`'{}'`)".format(make_title(style_str), style_str))) out_cells.append(nbf.new_code_cell( """\ with plt.style.context(('{0}')): make_plots('{0}')\ """.format(style_str).splitlines(keepends=True) )) nb['worksheets'].append(nbf.new_worksheet(cells=out_cells)) with open(output_path, 'w') as f: nbf.write(nb, f, 'ipynb') # subprocess.call(["ipython", "-c", "%run {}".format(filename)])
mit
sdiazb/airflow
airflow/hooks/dbapi_hook.py
17
9454
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from builtins import str from past.builtins import basestring from datetime import datetime from contextlib import closing import numpy import logging import sys from sqlalchemy import create_engine from airflow.hooks.base_hook import BaseHook from airflow.exceptions import AirflowException class DbApiHook(BaseHook): """ Abstract base class for sql hooks. """ # Override to provide the connection name. conn_name_attr = None # Override to have a default connection id for a particular dbHook default_conn_name = 'default_conn_id' # Override if this db supports autocommit. supports_autocommit = False # Override with the object that exposes the connect method connector = None def __init__(self, *args, **kwargs): if not self.conn_name_attr: raise AirflowException("conn_name_attr is not defined") elif len(args) == 1: setattr(self, self.conn_name_attr, args[0]) elif self.conn_name_attr not in kwargs: setattr(self, self.conn_name_attr, self.default_conn_name) else: setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr]) def get_conn(self): """Returns a connection object """ db = self.get_connection(getattr(self, self.conn_name_attr)) return self.connector.connect( host=db.host, port=db.port, username=db.login, schema=db.schema) def get_uri(self): conn = self.get_connection(getattr(self, self.conn_name_attr)) login = '' if conn.login: login = '{conn.login}:{conn.password}@'.format(conn=conn) host = conn.host if conn.port is not None: host += ':{port}'.format(port=conn.port) return '{conn.conn_type}://{login}{host}/{conn.schema}'.format( conn=conn, login=login, host=host) def get_sqlalchemy_engine(self, engine_kwargs=None): if engine_kwargs is None: engine_kwargs = {} return create_engine(self.get_uri(), **engine_kwargs) def get_pandas_df(self, sql, parameters=None): """ Executes the sql and returns a pandas dataframe :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if sys.version_info[0] < 3: sql = sql.encode('utf-8') import pandas.io.sql as psql with closing(self.get_conn()) as conn: return psql.read_sql(sql, con=conn, params=parameters) def get_records(self, sql, parameters=None): """ Executes the sql and returns a set of records. :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if sys.version_info[0] < 3: sql = sql.encode('utf-8') with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: if parameters is not None: cur.execute(sql, parameters) else: cur.execute(sql) return cur.fetchall() def get_first(self, sql, parameters=None): """ Executes the sql and returns the first resulting row. :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if sys.version_info[0] < 3: sql = sql.encode('utf-8') with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: if parameters is not None: cur.execute(sql, parameters) else: cur.execute(sql) return cur.fetchone() def run(self, sql, autocommit=False, parameters=None): """ Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param autocommit: What to set the connection's autocommit setting to before executing the query. :type autocommit: bool :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if isinstance(sql, basestring): sql = [sql] with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, autocommit) with closing(conn.cursor()) as cur: for s in sql: if sys.version_info[0] < 3: s = s.encode('utf-8') logging.info(s) if parameters is not None: cur.execute(s, parameters) else: cur.execute(s) conn.commit() def set_autocommit(self, conn, autocommit): conn.autocommit = autocommit def get_cursor(self): """ Returns a cursor """ return self.get_conn().cursor() def insert_rows(self, table, rows, target_fields=None, commit_every=1000): """ A generic way to insert a set of tuples into a table, a new transaction is created every commit_every rows :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings :param commit_every: The maximum number of rows to insert in one transaction. Set to 0 to insert all rows in one transaction. :type commit_every: int """ if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, False) conn.commit() with closing(conn.cursor()) as cur: for i, row in enumerate(rows, 1): l = [] for cell in row: l.append(self._serialize_cell(cell, conn)) values = tuple(l) placeholders = ["%s",]*len(values) sql = "INSERT INTO {0} {1} VALUES ({2});".format( table, target_fields, ",".join(placeholders)) cur.execute(sql, values) if commit_every and i % commit_every == 0: conn.commit() logging.info( "Loaded {i} into {table} rows so far".format(**locals())) conn.commit() logging.info( "Done loading. Loaded a total of {i} rows".format(**locals())) @staticmethod def _serialize_cell(cell, conn=None): """ Returns the SQL literal of the cell as a string. :param cell: The cell to insert into the table :type cell: object :param conn: The database connection :type conn: connection object :return: The serialized cell :rtype: str """ if cell is None: return None if isinstance(cell, datetime): return cell.isoformat() return str(cell) def bulk_dump(self, table, tmp_file): """ Dumps a database table into a tab-delimited file :param table: The name of the source table :type table: str :param tmp_file: The path of the target file :type tmp_file: str """ raise NotImplementedError() def bulk_load(self, table, tmp_file): """ Loads a tab-delimited file into a database table :param table: The name of the target table :type table: str :param tmp_file: The path of the file to load into the table :type tmp_file: str """ raise NotImplementedError()
apache-2.0
icdishb/scikit-learn
sklearn/tests/test_multiclass.py
9
24243
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_greater from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OutputCodeClassifier from sklearn.multiclass import fit_ovr from sklearn.multiclass import fit_ovo from sklearn.multiclass import fit_ecoc from sklearn.multiclass import predict_ovr from sklearn.multiclass import predict_ovo from sklearn.multiclass import predict_ecoc from sklearn.multiclass import predict_proba_ovr from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.preprocessing import LabelBinarizer from sklearn.svm import LinearSVC, SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge, Perceptron, LogisticRegression) from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline from sklearn import svm from sklearn import datasets from sklearn.externals.six.moves import zip iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovr.predict, []) with ignore_warnings(): assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()], LabelBinarizer(), []) # Fail on multioutput data assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1, 2], [3, 1]])) assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1.5, 2.4], [3.1, 0.8]])) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovr.estimators_), n_classes) clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2)) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_greater(np.mean(iris.target == pred), 0.65) def test_ovr_ovo_regressor(): # test that ovr and ovo work on regressors which don't have a decision_function ovr = OneVsRestClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovr.estimators_), n_classes) assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert_greater(np.mean(pred == iris.target), .9) ovr = OneVsOneClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2) assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert_greater(np.mean(pred == iris.target), .9) def test_ovr_fit_predict_sparse(): for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert_true(clf.multilabel_) assert_true(sp.issparse(Y_pred_sprs)) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): # Test that ovr works with classes that are always present or absent. # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert_equal(np.unique(y_pred[:, -2:]), 1) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert_equal(2, len(probabilities[0])) assert_equal(clf.classes_[np.argmax(probabilities, axis=1)], clf.predict(X_test)) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert_equal(y_pred, 1) for base_clf in (LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) @ignore_warnings def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"], ["ham", "eggs"], ["ham"]] # y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]] Y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5)): # test input as lists of tuples clf = assert_warns(DeprecationWarning, OneVsRestClassifier(base_clf).fit, X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict([[0, 4, 4]])[0] assert_equal(set(y_pred), set(["spam", "eggs"])) assert_true(clf.multilabel_) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert_true(clf.multilabel_) def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert_equal(len(ovr.estimators_), 3) assert_greater(ovr.score(iris.data, iris.target), .9) def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert_true(clf.multilabel_) assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) decision_only.fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = np.array([l.argmax() for l in Y_proba]) assert_false((pred - Y_pred).any()) def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal((clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test)) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) def test_ovr_coef_(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) ovr.fit(iris.data, iris.target) shape = ovr.coef_.shape assert_equal(shape[0], n_classes) assert_equal(shape[1], iris.data.shape[1]) def test_ovr_coef_exceptions(): # Not fitted exception! ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # lambda is needed because we don't want coef_ to be evaluated right away assert_raises(ValueError, lambda x: ovr.coef_, None) # Doesn't have coef_ exception! ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_raises(AttributeError, lambda x: ovr.coef_, None) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovo.predict, []) def test_ovo_fit_on_list(): # Test that OneVsOne fitting works with a list of targets and yields the # same output as predict from an array ovo = OneVsOneClassifier(LinearSVC(random_state=0)) prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data) prediction_from_list = ovo.fit(iris.data, list(iris.target)).predict(iris.data) assert_array_equal(prediction_from_array, prediction_from_list) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) def test_ovo_decision_function(): n_samples = iris.data.shape[0] ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0)) ovo_clf.fit(iris.data, iris.target) decisions = ovo_clf.decision_function(iris.data) assert_equal(decisions.shape, (n_samples, n_classes)) assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data)) # Compute the votes votes = np.zeros((n_samples, n_classes)) k = 0 for i in range(n_classes): for j in range(i + 1, n_classes): pred = ovo_clf.estimators_[k].predict(iris.data) votes[pred == 0, i] += 1 votes[pred == 1, j] += 1 k += 1 # Extract votes and verify assert_array_equal(votes, np.round(decisions)) for class_idx in range(n_classes): # For each sample and each class, there only 3 possible vote levels # because they are only 3 distinct class pairs thus 3 distinct # binary classifiers. # Therefore, sorting predictions based on votes would yield # mostly tied predictions: assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.]))) # The OVO decision function on the other hand is able to resolve # most of the ties on this data as it combines both the vote counts # and the aggregated confidence levels of the binary classifiers # to compute the aggregate decision function. The iris dataset # has 150 samples with a couple of duplicates. The OvO decisions # can resolve most of the ties: assert_greater(len(np.unique(decisions[:, class_idx])), 146) def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovo_ties(): # Test that ties are broken using the decision function, # not defaulting to the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron(shuffle=False)) ovo_prediction = multi_clf.fit(X, y).predict(X) ovo_decision = multi_clf.decision_function(X) # Classifiers are in order 0-1, 0-2, 1-2 # Use decision_function to compute the votes and the normalized # sum_of_confidences, which is used to disambiguate when there is a tie in # votes. votes = np.round(ovo_decision) normalized_confidences = ovo_decision - votes # For the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # For the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # For the tie, the prediction is the class with the highest score assert_equal(ovo_prediction[0], normalized_confidences[0].argmax()) def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron(shuffle=False)) ovo_prediction = multi_clf.fit(X, y).predict(X) assert_equal(ovo_prediction[0], i % 3) def test_ovo_string_y(): # Test that the OvO doesn't mess up the encoding of string labels X = np.eye(4) y = np.array(['a', 'b', 'c', 'd']) ovo = OneVsOneClassifier(LinearSVC()) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ecoc.predict, []) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) @ignore_warnings def test_deprecated(): base_estimator = DecisionTreeClassifier(random_state=0) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] all_metas = [ (OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr), (OneVsOneClassifier, fit_ovo, predict_ovo, None), (OutputCodeClassifier, fit_ecoc, predict_ecoc, None), ] for MetaEst, fit_func, predict_func, proba_func in all_metas: try: meta_est = MetaEst(base_estimator, random_state=0).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train, random_state=0) except TypeError: meta_est = MetaEst(base_estimator).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train) if len(fitted_return) == 2: estimators_, classes_or_lb = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test), meta_est.predict(X_test)) if proba_func is not None: assert_almost_equal(proba_func(estimators_, X_test, is_multilabel=False), meta_est.predict_proba(X_test)) else: estimators_, classes_or_lb, codebook = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, codebook, X_test), meta_est.predict(X_test)) if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
peterwilletts24/Monsoon-Python-Scripts
pp_load_mean_pickle.py
1
1683
""" Average mean sea level pressure by day, unrotate lat/lon and save """ import os, sys import itertools import numpy as np import cPickle as pickle #import matplotlib.animation as animation import iris import iris.coords as coords import iris.coord_categorisation from iris.analysis.interpolate import linear import cartopy.crs as ccrs import h5py #def checkpoleposition(cube): #rot_pole = temperature.coord('grid_latitude').coord_system.as_cartopy_crs() # ll = ccrs.Geodetic() #lon, lat = 40, -42 # Transform the lon lat point into unrotated coordinates. #target_xy = ll.transform_point(rotated_lon, rotated_lat, rot_pole) #extracted_cube = linear(temperature, [('grid_latitude', target_xy[1]), ('grid_longitude', target_xy[0] #experiment_ids = ['djzny', 'djznq', 'djznw'] experiment_ids = ['djzny'] data_to_mean = ['temp', 'sp_hum'] dset = ['t_on_p', 'sh_on_p'] for experiment_id in experiment_ids: expmin1 = experiment_id[:-1] for a, dm in enumerate(data_to_mean): #dataset = iris.load_cube(fname) fname = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_%s' % (dm,experiment_id) ds = dset[a] with h5py.File(fname, 'r') as i: d = i['%s' % ds] print d #iris.coord_categorisation.add_day_of_year(p_at_msl, 'time', name='dayyear') #print fname #daily_mean = .aggregated_by(['dayyear'], iris.analysis.MEAN) #daily_mean_rot = checkpoleposition(daily_mean) #if not os.path.exists(experiment_id): os.makedirs(experiment_id) #pickle.dump( daily_mean, open( "/home/pwille/python_scripts/%s/pickle_daily_mean_%s.p" % (experiment_id, experiment_id), "wb" ) )
mit
fzalkow/scikit-learn
sklearn/ensemble/tests/test_forest.py
48
35412
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import product import numpy as np from scipy.sparse import csr_matrix, csc_matrix, coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.grid_search import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.validation import check_random_state from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): # Check consistency on dataset iris. ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): # Check consistency on dataset boston house prices. ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", )): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): # Regression models should not have a classes_ attribute. r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): # Predict probabilities. ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, X, y): # Check variable importances. ForestClassifier = FOREST_CLASSIFIERS[name] for n_jobs in [1, 2]: clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) X_new = clf.transform(X, threshold="mean") assert_less(0 < X_new.shape[1], X.shape[1]) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=sample_weight) importances = clf.feature_importances_ assert_true(np.all(importances >= 0.0)) clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=3 * sample_weight) importances_bis = clf.feature_importances_ assert_almost_equal(importances, importances_bis) def test_importances(): X, y = datasets.make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name in FOREST_CLASSIFIERS: yield check_importances, name, X, y def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): # Check that oob prediction is a good estimation of the generalization # error. # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # csc matrix yield check_oob_score, name, csc_matrix(iris.data), iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 # csc matrix yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): # Check that base trees can be grid-searched. for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): # Check pickability. ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): # Check estimators on multi-output problems. X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): # Test that n_classes_ and classes_ have proper shape. ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning a dense array. # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning the same array for both argument values. # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name, X, y): # Test precedence of max_leaf_nodes over max_depth. ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name, X, y def check_min_samples_leaf(name, X, y): # Test if leaves contain more than leaf_count training examples ForestEstimator = FOREST_ESTIMATORS[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): est = ForestEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def test_min_samples_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name, X, y def check_min_weight_fraction_leaf(name, X, y): # Test if leaves contain at least min_weight_fraction_leaf of the # training set ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) if isinstance(est, (RandomForestClassifier, RandomForestRegressor)): est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name, X, y def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0, n_samples=40) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): # Check that it works no matter the memory layout est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) def test_1d_input(): X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_class_weights(name): # Check class_weights resemble sample_weights behavior. ForestClassifier = FOREST_CLASSIFIERS[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = ForestClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "balanced" which should also have no effect clf4 = ForestClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in FOREST_CLASSIFIERS: yield check_class_weights, name def check_class_weight_balanced_and_bootstrap_multi_output(name): # Test class_weight works for multi-output""" ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(class_weight='balanced', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}], random_state=0) clf.fit(X, _y) # smoke test for subsample and balanced subsample clf = ForestClassifier(class_weight='balanced_subsample', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight='subsample', random_state=0) ignore_warnings(clf.fit)(X, _y) def test_class_weight_balanced_and_bootstrap_multi_output(): for name in FOREST_CLASSIFIERS: yield check_class_weight_balanced_and_bootstrap_multi_output, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = ForestClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Warning warm_start with preset clf = ForestClassifier(class_weight='auto', warm_start=True, random_state=0) assert_warns(UserWarning, clf.fit, X, y) assert_warns(UserWarning, clf.fit, X, _y) # Not a list or preset for multi-output clf = ForestClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in FOREST_CLASSIFIERS: yield check_class_weight_errors, name def check_warm_start(name, random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): # Test if fit clears state and grows a new forest when warm_start==False. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): # Test if warm start second fit with smaller n_estimators raises error. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): # Test if warm start with equal n_estimators does nothing and returns the # same forest and raises a warning. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): # Test that the warm start computes oob score when asked. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name def test_dtype_convert(): classifier = RandomForestClassifier() CLASSES = 15 X = np.eye(CLASSES) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]] result = classifier.fit(X, y).predict(X) assert_array_equal(result, y)
bsd-3-clause
glm-tools/pyglmnet
examples/plot_community_crime.py
1
2870
# -*- coding: utf-8 -*- """ ====================== Community and Crime ====================== This is a real dataset of per capita violent crime, with demographic data comprising 128 attributes from 1994 counties in the US. The original dataset can be found here: http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime The target variables (per capita violent crime) are normalized to lie in a [0, 1] range. We preprocessed this dataset to exclude attributes with missing values. """ ######################################################## # Author: Vinicius Marques <vini.type@gmail.com> # License: MIT ######################################################## # Imports import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from pyglmnet import GLM, GLMCV, datasets ######################################################## # Download and preprocess data files X, y = datasets.fetch_community_crime_data() n_samples, n_features = X.shape ######################################################## # Split the data into training and test sets X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.33, random_state=0) ######################################################## # Fit a binomial distributed GLM with elastic net regularization # use the default value for reg_lambda glm = GLMCV(distr='binomial', alpha=0.05, score_metric='pseudo_R2', cv=3, tol=1e-4) # fit model glm.fit(X_train, y_train) # score the test set prediction y_test_hat = glm.predict_proba(X_test) print("test set pseudo $R^2$ = %f" % glm.score(X_test, y_test)) ######################################################## # Now use GridSearchCV to compare import numpy as np # noqa from sklearn.model_selection import GridSearchCV # noqa from sklearn.model_selection import KFold # noqa cv = KFold(3) reg_lambda = np.logspace(np.log(0.5), np.log(0.01), 10, base=np.exp(1)) param_grid = [{'reg_lambda': reg_lambda}] glm = GLM(distr='binomial', alpha=0.05, score_metric='pseudo_R2', learning_rate=0.1, tol=1e-4, verbose=True) glmcv = GridSearchCV(glm, param_grid, cv=cv) glmcv.fit(X_train, y_train) print("test set pseudo $R^2$ = %f" % glmcv.score(X_test, y_test)) ######################################################## # Plot the true and predicted test set target values plt.plot(y_test[:50], 'ko-') plt.plot(y_test_hat[:50], 'ro-') plt.legend(['true', 'pred'], frameon=False) plt.xlabel('Counties') plt.ylabel('Per capita violent crime') plt.tick_params(axis='y', right='off') plt.tick_params(axis='x', top='off') ax = plt.gca() ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.show() ######################################################## # We can also check if the algorithm converged properly glmcv.best_estimator_.plot_convergence()
mit
kastnerkyle/dagbldr
examples/mnist_vae/sample_mnist_vae.py
5
2193
import argparse import numpy as np import os from dagbldr.datasets import fetch_binarized_mnist from dagbldr.utils import load_checkpoint, make_gif, interpolate_between_points parser = argparse.ArgumentParser() parser.add_argument("saved_functions_file", help="Saved pickle file from vae training") parser.add_argument("--seed", "-s", help="random seed for path calculation", action="store", default=1979, type=int) args = parser.parse_args() if not os.path.exists(args.saved_functions_file): raise ValueError("Please provide a valid path for saved pickle file!") checkpoint_dict = load_checkpoint(args.saved_functions_file) encode_function = checkpoint_dict["encode_function"] decode_function = checkpoint_dict["decode_function"] random_state = np.random.RandomState(args.seed) mnist = fetch_binarized_mnist() # visualize against validation so we aren't cheating valid_indices = mnist["valid_indices"] X = mnist["data"][valid_indices] # number of samples n_plot_samples = 5 # MNIST dimensions width = 28 height = 28 # Get random data samples ind = np.arange(len(X)) random_state.shuffle(ind) sample_X = X[ind[:n_plot_samples]] def gen_samples(arr): mu, log_sig = encode_function(arr) # No noise at test time out, = decode_function(mu + np.exp(log_sig)) return out # VAE specific plotting import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt samples = gen_samples(sample_X) f, axarr = plt.subplots(n_plot_samples, 2) for n, (X_i, s_i) in enumerate(zip(sample_X, samples)): axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray") axarr[n, 1].matshow(s_i.reshape(width, height), cmap="gray") axarr[n, 0].axis('off') axarr[n, 1].axis('off') plt.savefig('vae_reconstruction.png') plt.close() # Calculate linear path between points in space mus, log_sigmas = encode_function(sample_X) mu_path = interpolate_between_points(mus) log_sigma_path = interpolate_between_points(log_sigmas) # Path across space from one point to another path = mu_path + np.exp(log_sigma_path) out, = decode_function(path) make_gif(out, "vae_code.gif", width, height, delay=1, grayscale=True)
bsd-3-clause
mqyqlx/deeppy
examples/mlp_mnist.py
9
2214
#!/usr/bin/env python """ Digit classification ==================== """ import numpy as np import matplotlib.pyplot as plt import deeppy as dp # Fetch MNIST data dataset = dp.dataset.MNIST() x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True) # Normalize pixel intensities scaler = dp.StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Prepare network inputs batch_size = 128 train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) test_input = dp.Input(x_test) # Setup network weight_gain = 2.0 weight_decay = 0.0005 net = dp.NeuralNetwork( layers=[ dp.FullyConnected( n_out=1024, weights=dp.Parameter(dp.AutoFiller(weight_gain), weight_decay=weight_decay), ), dp.ReLU(), dp.FullyConnected( n_out=1024, weights=dp.Parameter(dp.AutoFiller(weight_gain), weight_decay=weight_decay), ), dp.ReLU(), dp.FullyConnected( n_out=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller()), ), ], loss=dp.SoftmaxCrossEntropy(), ) # Train network n_epochs = [50, 15] learn_rate = 0.05 for i, epochs in enumerate(n_epochs): trainer = dp.StochasticGradientDescent( max_epochs=epochs, learn_rule=dp.Momentum(learn_rate=learn_rate/10**i, momentum=0.94), ) trainer.train(net, train_input) # Evaluate on test data error = np.mean(net.predict(test_input) != y_test) print('Test error rate: %.4f' % error) # Plot dataset examples def plot_img(img, title): plt.figure() plt.imshow(img, cmap='gray', interpolation='nearest') plt.axis('off') plt.title(title) plt.tight_layout() imgs = np.reshape(x_train[:63, ...], (-1, 28, 28)) plot_img(dp.misc.img_tile(dp.misc.img_stretch(imgs)), 'Dataset examples') # Plot learned features in first layer w = np.array(net.layers[0].weights.array) w = np.reshape(w.T, (-1,) + dataset.img_shape) w = w[np.argsort(np.std(w, axis=(1, 2)))[-64:]] plot_img(dp.misc.img_tile(dp.misc.img_stretch(w)), 'Examples of features learned')
mit
kazemakase/scikit-learn
examples/classification/plot_classifier_comparison.py
181
4699
#!/usr/bin/python # -*- coding: utf-8 -*- """ ===================== Classifier comparison ===================== A comparison of a several classifiers in scikit-learn on synthetic datasets. The point of this example is to illustrate the nature of decision boundaries of different classifiers. This should be taken with a grain of salt, as the intuition conveyed by these examples does not necessarily carry over to real datasets. Particularly in high-dimensional spaces, data can more easily be separated linearly and the simplicity of classifiers such as naive Bayes and linear SVMs might lead to better generalization than is achieved by other classifiers. The plots show training points in solid colors and testing points semi-transparent. The lower right shows the classification accuracy on the test set. """ print(__doc__) # Code source: Gaël Varoquaux # Andreas Müller # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.lda import LDA from sklearn.qda import QDA h = .02 # step size in the mesh names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), AdaBoostClassifier(), GaussianNB(), LDA(), QDA()] X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable ] figure = plt.figure(figsize=(27, 9)) i = 1 # iterate over datasets for ds in datasets: # preprocess dataset, split into training and test part X, y = ds X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 figure.subplots_adjust(left=.02, right=.98) plt.show()
bsd-3-clause
jblackburne/scikit-learn
doc/conf.py
12
9568
# -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os from sklearn.externals.six import u # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve import sphinx_gallery # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'numpy_ext.numpydoc', 'sphinx.ext.linkcode', 'sphinx.ext.doctest', 'sphinx_gallery.gen_gallery', ] # pngmath / imgmath compatibility layer for different sphinx versions import sphinx from distutils.version import LooseVersion if LooseVersion(sphinx.__version__) < LooseVersion('1.4'): extensions.append('sphinx.ext.pngmath') else: extensions.append('sphinx.ext.imgmath') autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # Generate the plots for the gallery plot_gallery = True # The master toctree document. master_doc = 'index' # General information about the project. project = u('scikit-learn') copyright = u('2010 - 2016, scikit-learn developers (BSD License)') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import sklearn version = sklearn.__version__ # The full version, including alpha/beta/rc tags. release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be # searched for source files. exclude_trees = ['_build', 'templates', 'includes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': True, 'surveybanner': False, 'sprintbanner': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'scikit-learn' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'logos/scikit-learn-logo-small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logos/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'scikit-learndoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'), u('scikit-learn developers'), 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} \usepackage{enumitem} \setlistdepth{10} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True sphinx_gallery_conf = { 'doc_module': 'sklearn', 'reference_url': { 'sklearn': None, 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', 'nibabel': 'http://nipy.org/nibabel'} } # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600, 'sphx_glr_plot_outlier_detection_003.png': 372, 'sphx_glr_plot_gpr_co2_001.png': 350, 'sphx_glr_plot_adaboost_twoclass_001.png': 372, 'sphx_glr_plot_compare_methods_001.png': 349} def make_carousel_thumbs(app, exception): """produces the final resized carousel images""" if exception is not None: return print('Preparing carousel images') image_dir = os.path.join(app.builder.outdir, '_images') for glr_plot, max_width in carousel_thumbs.items(): image = os.path.join(image_dir, glr_plot) if os.path.exists(image): c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png') sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190) def setup(app): # to hide/show the prompt in code examples: app.add_javascript('js/copybutton.js') app.connect('build-finished', make_carousel_thumbs) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('sklearn', u'https://github.com/scikit-learn/' 'scikit-learn/blob/{revision}/' '{package}/{path}#L{lineno}')
bsd-3-clause
theoryno3/pylearn2
pylearn2/scripts/datasets/browse_norb.py
44
15741
#!/usr/bin/env python """ A browser for the NORB and small NORB datasets. Navigate the images by choosing the values for the label vector. Note that for the 'big' NORB dataset, you can only set the first 5 label dimensions. You can then cycle through the 3-12 images that fit those labels. """ import sys import os import argparse import numpy import warnings try: import matplotlib from matplotlib import pyplot except ImportError as import_error: warnings.warn("Can't use this script without matplotlib.") matplotlib = None pyplot = None from pylearn2.datasets.new_norb import NORB from pylearn2.utils import safe_zip, serial def _parse_args(): parser = argparse.ArgumentParser( description="Browser for NORB dataset.") parser.add_argument('--which_norb', type=str, required=False, choices=('big', 'small'), help="'Selects the (big) NORB, or the Small NORB.") parser.add_argument('--which_set', type=str, required=False, choices=('train', 'test', 'both'), help="'train', or 'test'") parser.add_argument('--pkl', type=str, required=False, help=".pkl file of NORB dataset") parser.add_argument('--stereo_viewer', action='store_true', help="Swaps left and right stereo images, so you " "can see them in 3D by crossing your eyes.") parser.add_argument('--no_norm', action='store_true', help="Don't normalize pixel values") result = parser.parse_args() if (result.pkl is not None) == (result.which_norb is not None or result.which_set is not None): print("Must supply either --pkl, or both --which_norb and " "--which_set.") sys.exit(1) if (result.which_norb is None) != (result.which_set is None): print("When not supplying --pkl, you must supply both " "--which_norb and --which_set.") sys.exit(1) if result.pkl is not None: if not result.pkl.endswith('.pkl'): print("--pkl must be a filename that ends in .pkl") sys.exit(1) if not os.path.isfile(result.pkl): print("couldn't find --pkl file '%s'" % result.pkl) sys.exit(1) return result def _make_grid_to_short_label(dataset): """ Returns an array x such that x[a][b] gives label index a's b'th unique value. In other words, it maps label grid indices a, b to the corresponding label value. """ unique_values = [sorted(list(frozenset(column))) for column in dataset.y[:, :5].transpose()] # If dataset contains blank images, removes the '-1' labels # corresponding to blank images, since they aren't contained in the # label grid. category_index = dataset.label_name_to_index['category'] unique_categories = unique_values[category_index] category_to_name = dataset.label_to_value_funcs[category_index] if any(category_to_name(category) == 'blank' for category in unique_categories): for d in range(1, len(unique_values)): assert unique_values[d][0] == -1, ("unique_values: %s" % str(unique_values)) unique_values[d] = unique_values[d][1:] return unique_values def _get_blank_label(dataset): """ Returns the label vector associated with blank images. If dataset is a Small NORB (i.e. it has no blank images), this returns None. """ category_index = dataset.label_name_to_index['category'] category_to_name = dataset.label_to_value_funcs[category_index] blank_label = 5 try: blank_name = category_to_name(blank_label) except ValueError: # Returns None if there is no 'blank' category (e.g. if we're using # the small NORB dataset. return None assert blank_name == 'blank' blank_rowmask = dataset.y[:, category_index] == blank_label blank_labels = dataset.y[blank_rowmask, :] if not blank_rowmask.any(): return None if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]): raise ValueError("Expected all labels of category 'blank' to have " "the same value, but they differed.") return blank_labels[0, :].copy() def _make_label_to_row_indices(labels): """ Returns a map from short labels (the first 5 elements of the label vector) to the list of row indices of rows in the dense design matrix with that label. For Small NORB, all unique short labels have exactly one row index. For big NORB, a short label can have 0-N row indices. """ result = {} for row_index, label in enumerate(labels): short_label = tuple(label[:5]) if result.get(short_label, None) is None: result[short_label] = [] result[short_label].append(row_index) return result def main(): """Top-level function.""" args = _parse_args() if args.pkl is not None: dataset = serial.load(args.pkl) else: dataset = NORB(args.which_norb, args.which_set) # Indexes into the first 5 labels, which live on a 5-D grid. grid_indices = [0, ] * 5 grid_to_short_label = _make_grid_to_short_label(dataset) # Maps 5-D label vector to a list of row indices for dataset.X, dataset.y # that have those labels. label_to_row_indices = _make_label_to_row_indices(dataset.y) # Indexes into the row index lists returned by label_to_row_indices. object_image_index = [0, ] blank_image_index = [0, ] blank_label = _get_blank_label(dataset) # Index into grid_indices currently being edited grid_dimension = [0, ] dataset_is_stereo = 's' in dataset.view_converter.axes figure, all_axes = pyplot.subplots(1, 3 if dataset_is_stereo else 2, squeeze=True, figsize=(10, 3.5)) set_name = (os.path.split(args.pkl)[1] if args.which_set is None else "%sing set" % args.which_set) figure.canvas.set_window_title("NORB dataset (%s)" % set_name) label_text = figure.suptitle('Up/down arrows choose label, ' 'left/right arrows change it', x=0.1, horizontalalignment="left") # Hides axes' tick marks for axes in all_axes: axes.get_xaxis().set_visible(False) axes.get_yaxis().set_visible(False) text_axes, image_axes = (all_axes[0], all_axes[1:]) image_captions = (('left', 'right') if dataset_is_stereo else ('mono image', )) if args.stereo_viewer: image_captions = tuple(reversed(image_captions)) for image_ax, caption in safe_zip(image_axes, image_captions): image_ax.set_title(caption) text_axes.set_frame_on(False) # Hides background of text_axes def is_blank(grid_indices): assert len(grid_indices) == 5 assert all(x >= 0 for x in grid_indices) ci = dataset.label_name_to_index['category'] # category index category = grid_to_short_label[ci][grid_indices[ci]] category_name = dataset.label_to_value_funcs[ci](category) return category_name == 'blank' def get_short_label(grid_indices): """ Returns the first 5 elements of the label vector pointed to by grid_indices. We use the first 5, since they're the labels used by both the 'big' and Small NORB datasets. """ # Need to special-case the 'blank' category, since it lies outside of # the grid. if is_blank(grid_indices): # won't happen with SmallNORB return tuple(blank_label[:5]) else: return tuple(grid_to_short_label[i][g] for i, g in enumerate(grid_indices)) def get_row_indices(grid_indices): short_label = get_short_label(grid_indices) return label_to_row_indices.get(short_label, None) axes_to_pixels = {} def redraw(redraw_text, redraw_images): row_indices = get_row_indices(grid_indices) if row_indices is None: row_index = None image_index = 0 num_images = 0 else: image_index = (blank_image_index if is_blank(grid_indices) else object_image_index)[0] row_index = row_indices[image_index] num_images = len(row_indices) def draw_text(): if row_indices is None: padding_length = dataset.y.shape[1] - len(grid_indices) current_label = (tuple(get_short_label(grid_indices)) + (0, ) * padding_length) else: current_label = dataset.y[row_index, :] label_names = dataset.label_index_to_name label_values = [label_to_value(label) for label_to_value, label in safe_zip(dataset.label_to_value_funcs, current_label)] lines = ['%s: %s' % (t, v) for t, v in safe_zip(label_names, label_values)] if dataset.y.shape[1] > 5: # Inserts image number & blank line between editable and # fixed labels. lines = (lines[:5] + ['No such image' if num_images == 0 else 'image: %d of %d' % (image_index + 1, num_images), '\n'] + lines[5:]) # prepends the current index's line with an arrow. lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]] text_axes.clear() # "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right. text_axes.text(0, 0.5, # coords '\n'.join(lines), verticalalignment='center', transform=text_axes.transAxes) def draw_images(): if row_indices is None: for axis in image_axes: axis.clear() else: data_row = dataset.X[row_index:row_index + 1, :] axes_names = dataset.view_converter.axes assert len(axes_names) in (4, 5) assert axes_names[0] == 'b' assert axes_names[-3] == 0 assert axes_names[-2] == 1 assert axes_names[-1] == 'c' def draw_image(image, axes): assert len(image.shape) == 2 norm = matplotlib.colors.NoNorm() if args.no_norm else None axes_to_pixels[axes] = image axes.imshow(image, norm=norm, cmap='gray') if 's' in axes_names: image_pair = \ dataset.get_topological_view(mat=data_row, single_tensor=True) # Shaves off the singleton dimensions # (batch # and channel #), leaving just 's', 0, and 1. image_pair = tuple(image_pair[0, :, :, :, 0]) if args.stereo_viewer: image_pair = tuple(reversed(image_pair)) for axis, image in safe_zip(image_axes, image_pair): draw_image(image, axis) else: image = dataset.get_topological_view(mat=data_row) image = image[0, :, :, 0] draw_image(image, image_axes[0]) if redraw_text: draw_text() if redraw_images: draw_images() figure.canvas.draw() default_status_text = ("mouseover image%s for pixel values" % ("" if len(image_axes) == 1 else "s")) status_text = figure.text(0.5, 0.1, default_status_text) def on_mouse_motion(event): original_text = status_text.get_text() if event.inaxes not in image_axes: status_text.set_text(default_status_text) else: pixels = axes_to_pixels[event.inaxes] row = int(event.ydata + .5) col = int(event.xdata + .5) status_text.set_text("Pixel value: %g" % pixels[row, col]) if status_text.get_text != original_text: figure.canvas.draw() def on_key_press(event): def add_mod(arg, step, size): return (arg + size + step) % size def incr_index_type(step): num_dimensions = len(grid_indices) if dataset.y.shape[1] > 5: # If dataset is big NORB, add one for the image index num_dimensions += 1 grid_dimension[0] = add_mod(grid_dimension[0], step, num_dimensions) def incr_index(step): assert step in (0, -1, 1), ("Step was %d" % step) image_index = (blank_image_index if is_blank(grid_indices) else object_image_index) if grid_dimension[0] == 5: # i.e. the image index row_indices = get_row_indices(grid_indices) if row_indices is None: image_index[0] = 0 else: # increment the image index image_index[0] = add_mod(image_index[0], step, len(row_indices)) else: # increment one of the grid indices gd = grid_dimension[0] grid_indices[gd] = add_mod(grid_indices[gd], step, len(grid_to_short_label[gd])) row_indices = get_row_indices(grid_indices) if row_indices is None: image_index[0] = 0 else: # some grid indices have 2 images instead of 3. image_index[0] = min(image_index[0], len(row_indices)) # Disables left/right key if we're currently showing a blank, # and the current index type is neither 'category' (0) nor # 'image number' (5) disable_left_right = (is_blank(grid_indices) and not (grid_dimension[0] in (0, 5))) if event.key == 'up': incr_index_type(-1) redraw(True, False) elif event.key == 'down': incr_index_type(1) redraw(True, False) elif event.key == 'q': sys.exit(0) elif not disable_left_right: if event.key == 'left': incr_index(-1) redraw(True, True) elif event.key == 'right': incr_index(1) redraw(True, True) figure.canvas.mpl_connect('key_press_event', on_key_press) figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion) redraw(True, True) pyplot.show() if __name__ == '__main__': main()
bsd-3-clause
rkmaddox/mne-python
mne/tests/test_source_estimate.py
3
76498
# -*- coding: utf-8 -*- # # License: BSD (3-clause) from contextlib import nullcontext from copy import deepcopy import os import os.path as op from shutil import copyfile import re import numpy as np from numpy.fft import fft from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal, assert_array_less) import pytest from scipy import sparse from scipy.optimize import fmin_cobyla from scipy.spatial.distance import cdist import mne from mne import (stats, SourceEstimate, VectorSourceEstimate, VolSourceEstimate, Label, read_source_spaces, read_evokeds, MixedSourceEstimate, find_events, Epochs, read_source_estimate, extract_label_time_course, spatio_temporal_tris_adjacency, stc_near_sensors, spatio_temporal_src_adjacency, read_cov, EvokedArray, spatial_inter_hemi_adjacency, read_forward_solution, spatial_src_adjacency, spatial_tris_adjacency, pick_info, SourceSpaces, VolVectorSourceEstimate, read_trans, pick_types, MixedVectorSourceEstimate, setup_volume_source_space, convert_forward_solution, pick_types_forward, compute_source_morph, labels_to_stc, scale_mri, write_source_spaces) from mne.datasets import testing from mne.externals.h5io import write_hdf5 from mne.fixes import _get_img_fdata from mne.io import read_info from mne.io.constants import FIFF from mne.morph_map import _make_morph_map_hemi from mne.source_estimate import grade_to_tris, _get_vol_mask from mne.source_space import _get_src_nn from mne.transforms import apply_trans, invert_transform, transform_surface_to from mne.minimum_norm import (read_inverse_operator, apply_inverse, apply_inverse_epochs, make_inverse_operator) from mne.label import read_labels_from_annot, label_sign_flip from mne.utils import (requires_pandas, requires_sklearn, catch_logging, requires_h5py, requires_nibabel, requires_version) from mne.io import read_raw_fif data_path = testing.data_path(download=False) subjects_dir = op.join(data_path, 'subjects') fname_inv = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') fname_inv_fixed = op.join( data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-meg-fixed-inv.fif') fname_fwd = op.join( data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') fname_cov = op.join( data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif') fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') fname_fs_t1 = op.join(data_path, 'subjects', 'fsaverage', 'mri', 'T1.mgz') fname_aseg = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz') fname_src = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') fname_src_fs = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') bem_path = op.join(data_path, 'subjects', 'sample', 'bem') fname_src_3 = op.join(bem_path, 'sample-oct-4-src.fif') fname_src_vol = op.join(bem_path, 'sample-volume-7mm-src.fif') fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg') fname_vol = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w') fname_vsrc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-vol-7-fwd.fif') fname_inv_vol = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-vol-7-meg-inv.fif') fname_nirx = op.join(data_path, 'NIRx', 'nirscout', 'nirx_15_0_recording') rng = np.random.RandomState(0) @testing.requires_testing_data def test_stc_baseline_correction(): """Test baseline correction for source estimate objects.""" # test on different source estimates stcs = [read_source_estimate(fname_stc), read_source_estimate(fname_vol, 'sample')] # test on different "baseline" intervals baselines = [(0., 0.1), (None, None)] for stc in stcs: times = stc.times for (start, stop) in baselines: # apply baseline correction, then check if it worked stc = stc.apply_baseline(baseline=(start, stop)) t0 = start or stc.times[0] t1 = stop or stc.times[-1] # index for baseline interval (include boundary latencies) imin = np.abs(times - t0).argmin() imax = np.abs(times - t1).argmin() + 1 # data matrix from baseline interval data_base = stc.data[:, imin:imax] mean_base = data_base.mean(axis=1) zero_array = np.zeros(mean_base.shape[0]) # test if baseline properly subtracted (mean=zero for all sources) assert_array_almost_equal(mean_base, zero_array) @testing.requires_testing_data def test_spatial_inter_hemi_adjacency(): """Test spatial adjacency between hemispheres.""" # trivial cases conn = spatial_inter_hemi_adjacency(fname_src_3, 5e-6) assert_equal(conn.data.size, 0) conn = spatial_inter_hemi_adjacency(fname_src_3, 5e6) assert_equal(conn.data.size, np.prod(conn.shape) // 2) # actually interesting case (1cm), should be between 2 and 10% of verts src = read_source_spaces(fname_src_3) conn = spatial_inter_hemi_adjacency(src, 10e-3) conn = conn.tocsr() n_src = conn.shape[0] assert (n_src * 0.02 < conn.data.size < n_src * 0.10) assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0) assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0) c = (conn.T + conn) / 2. - conn c.eliminate_zeros() assert_equal(c.data.size, 0) # check locations upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray() assert_equal(upper_right.sum(), conn.sum() // 2) good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post', 'G_cuneus'] for hi, hemi in enumerate(('lh', 'rh')): has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right, axis=1 - hi))[0]] labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi, subjects_dir=subjects_dir) use_labels = [label.name[:-3] for label in labels if np.in1d(label.vertices, has_neighbors).any()] assert (set(use_labels) - set(good_labels) == set()) @pytest.mark.slowtest @testing.requires_testing_data @requires_h5py def test_volume_stc(tmpdir): """Test volume STCs.""" N = 100 data = np.arange(N)[:, np.newaxis] datas = [data, data, np.arange(2)[:, np.newaxis], np.arange(6).reshape(2, 3, 1)] vertno = np.arange(N) vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis], np.arange(2)] vertno_reads = [vertno, vertno, np.arange(2), np.arange(2)] for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads): if data.ndim in (1, 2): stc = VolSourceEstimate(data, [vertno], 0, 1) ext = 'stc' klass = VolSourceEstimate else: assert data.ndim == 3 stc = VolVectorSourceEstimate(data, [vertno], 0, 1) ext = 'h5' klass = VolVectorSourceEstimate fname_temp = tmpdir.join('temp-vl.' + ext) stc_new = stc n = 3 if ext == 'h5' else 2 for ii in range(n): if ii < 2: stc_new.save(fname_temp) else: # Pass stc.vertices[0], an ndarray, to ensure support for # the way we used to write volume STCs write_hdf5( str(fname_temp), dict( vertices=stc.vertices[0], data=stc.data, tmin=stc.tmin, tstep=stc.tstep, subject=stc.subject, src_type=stc._src_type), title='mnepython', overwrite=True) stc_new = read_source_estimate(fname_temp) assert isinstance(stc_new, klass) assert_array_equal(vertno_read, stc_new.vertices[0]) assert_array_almost_equal(stc.data, stc_new.data) # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') assert isinstance(stc, VolSourceEstimate) assert 'sample' in repr(stc) assert ' kB' in repr(stc) stc_new = stc pytest.raises(ValueError, stc.save, fname_vol, ftype='whatever') for ftype in ['w', 'h5']: for _ in range(2): fname_temp = tmpdir.join('temp-vol.%s' % ftype) stc_new.save(fname_temp, ftype=ftype) stc_new = read_source_estimate(fname_temp) assert (isinstance(stc_new, VolSourceEstimate)) assert_array_equal(stc.vertices[0], stc_new.vertices[0]) assert_array_almost_equal(stc.data, stc_new.data) @requires_nibabel() @testing.requires_testing_data def test_stc_as_volume(): """Test previous volume source estimate morph.""" import nibabel as nib inverse_operator_vol = read_inverse_operator(fname_inv_vol) # Apply inverse operator stc_vol = read_source_estimate(fname_vol, 'sample') img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=True, dest='42') t1_img = nib.load(fname_t1) # always assure nifti and dimensionality assert isinstance(img, nib.Nifti1Image) assert img.header.get_zooms()[:3] == t1_img.header.get_zooms()[:3] img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=False) assert isinstance(img, nib.Nifti1Image) assert img.shape[:3] == inverse_operator_vol['src'][0]['shape'][:3] with pytest.raises(ValueError, match='Invalid value.*output.*'): stc_vol.as_volume(inverse_operator_vol['src'], format='42') @testing.requires_testing_data @requires_nibabel() def test_save_vol_stc_as_nifti(tmpdir): """Save the stc as a nifti file and export.""" import nibabel as nib src = read_source_spaces(fname_vsrc) vol_fname = tmpdir.join('stc.nii.gz') # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') assert (isinstance(stc, VolSourceEstimate)) stc.save_as_volume(vol_fname, src, dest='surf', mri_resolution=False) with pytest.warns(None): # nib<->numpy img = nib.load(str(vol_fname)) assert (img.shape == src[0]['shape'] + (len(stc.times),)) with pytest.warns(None): # nib<->numpy t1_img = nib.load(fname_t1) stc.save_as_volume(tmpdir.join('stc.nii.gz'), src, dest='mri', mri_resolution=True) with pytest.warns(None): # nib<->numpy img = nib.load(str(vol_fname)) assert (img.shape == t1_img.shape + (len(stc.times),)) assert_allclose(img.affine, t1_img.affine, atol=1e-5) # export without saving img = stc.as_volume(src, dest='mri', mri_resolution=True) assert (img.shape == t1_img.shape + (len(stc.times),)) assert_allclose(img.affine, t1_img.affine, atol=1e-5) src = SourceSpaces([src[0], src[0]]) stc = VolSourceEstimate(np.r_[stc.data, stc.data], [stc.vertices[0], stc.vertices[0]], tmin=stc.tmin, tstep=stc.tstep, subject='sample') img = stc.as_volume(src, dest='mri', mri_resolution=False) assert (img.shape == src[0]['shape'] + (len(stc.times),)) @testing.requires_testing_data def test_expand(): """Test stc expansion.""" stc_ = read_source_estimate(fname_stc, 'sample') vec_stc_ = VectorSourceEstimate(np.zeros((stc_.data.shape[0], 3, stc_.data.shape[1])), stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject) for stc in [stc_, vec_stc_]: assert ('sample' in repr(stc)) labels_lh = read_labels_from_annot('sample', 'aparc', 'lh', subjects_dir=subjects_dir) new_label = labels_lh[0] + labels_lh[1] stc_limited = stc.in_label(new_label) stc_new = stc_limited.copy() stc_new.data.fill(0) for label in labels_lh[:2]: stc_new += stc.in_label(label).expand(stc_limited.vertices) pytest.raises(TypeError, stc_new.expand, stc_limited.vertices[0]) pytest.raises(ValueError, stc_new.expand, [stc_limited.vertices[0]]) # make sure we can't add unless vertno agree pytest.raises(ValueError, stc.__add__, stc.in_label(labels_lh[0])) def _fake_stc(n_time=10, is_complex=False): np.random.seed(7) verts = [np.arange(10), np.arange(90)] data = np.random.rand(100, n_time) if is_complex: data.astype(complex) return SourceEstimate(data, verts, 0, 1e-1, 'foo') def _fake_vec_stc(n_time=10, is_complex=False): np.random.seed(7) verts = [np.arange(10), np.arange(90)] data = np.random.rand(100, 3, n_time) if is_complex: data.astype(complex) return VectorSourceEstimate(data, verts, 0, 1e-1, 'foo') @testing.requires_testing_data def test_stc_snr(): """Test computing SNR from a STC.""" inv = read_inverse_operator(fname_inv_fixed) fwd = read_forward_solution(fname_fwd) cov = read_cov(fname_cov) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01) stc = apply_inverse(evoked, inv) assert (stc.data < 0).any() with pytest.warns(RuntimeWarning, match='nAm'): stc.estimate_snr(evoked.info, fwd, cov) # dSPM with pytest.warns(RuntimeWarning, match='free ori'): abs(stc).estimate_snr(evoked.info, fwd, cov) stc = apply_inverse(evoked, inv, method='MNE') snr = stc.estimate_snr(evoked.info, fwd, cov) assert_allclose(snr.times, evoked.times) snr = snr.data assert snr.max() < -10 assert snr.min() > -120 def test_stc_attributes(): """Test STC attributes.""" stc = _fake_stc(n_time=10) vec_stc = _fake_vec_stc(n_time=10) n_times = len(stc.times) assert_equal(stc._data.shape[-1], n_times) assert_array_equal(stc.times, stc.tmin + np.arange(n_times) * stc.tstep) assert_array_almost_equal( stc.times, [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) def attempt_times_mutation(stc): stc.times -= 1 def attempt_assignment(stc, attr, val): setattr(stc, attr, val) # .times is read-only pytest.raises(ValueError, attempt_times_mutation, stc) pytest.raises(ValueError, attempt_assignment, stc, 'times', [1]) # Changing .tmin or .tstep re-computes .times stc.tmin = 1 assert (type(stc.tmin) == float) assert_array_almost_equal( stc.times, [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]) stc.tstep = 1 assert (type(stc.tstep) == float) assert_array_almost_equal( stc.times, [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) # tstep <= 0 is not allowed pytest.raises(ValueError, attempt_assignment, stc, 'tstep', 0) pytest.raises(ValueError, attempt_assignment, stc, 'tstep', -1) # Changing .data re-computes .times stc.data = np.random.rand(100, 5) assert_array_almost_equal( stc.times, [1., 2., 3., 4., 5.]) # .data must match the number of vertices pytest.raises(ValueError, attempt_assignment, stc, 'data', [[1]]) pytest.raises(ValueError, attempt_assignment, stc, 'data', None) # .data much match number of dimensions pytest.raises(ValueError, attempt_assignment, stc, 'data', np.arange(100)) pytest.raises(ValueError, attempt_assignment, vec_stc, 'data', [np.arange(100)]) pytest.raises(ValueError, attempt_assignment, vec_stc, 'data', [[[np.arange(100)]]]) # .shape attribute must also work when ._data is None stc._kernel = np.zeros((2, 2)) stc._sens_data = np.zeros((2, 3)) stc._data = None assert_equal(stc.shape, (2, 3)) # bad size of data stc = _fake_stc() data = stc.data[:, np.newaxis, :] with pytest.raises(ValueError, match='2 dimensions for SourceEstimate'): SourceEstimate(data, stc.vertices, 0, 1) stc = SourceEstimate(data[:, 0, 0], stc.vertices, 0, 1) assert stc.data.shape == (len(data), 1) def test_io_stc(tmpdir): """Test IO for STC files.""" stc = _fake_stc() stc.save(tmpdir.join("tmp.stc")) stc2 = read_source_estimate(tmpdir.join("tmp.stc")) assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.tmin, stc2.tmin) assert_equal(len(stc.vertices), len(stc2.vertices)) for v1, v2 in zip(stc.vertices, stc2.vertices): assert_array_almost_equal(v1, v2) assert_array_almost_equal(stc.tstep, stc2.tstep) # test warning for complex data stc2.data = stc2.data.astype(np.complex128) with pytest.raises(ValueError, match='Cannot save complex-valued STC'): stc2.save(tmpdir.join('complex.stc')) @requires_h5py @pytest.mark.parametrize('is_complex', (True, False)) @pytest.mark.parametrize('vector', (True, False)) def test_io_stc_h5(tmpdir, is_complex, vector): """Test IO for STC files using HDF5.""" if vector: stc = _fake_vec_stc(is_complex=is_complex) else: stc = _fake_stc(is_complex=is_complex) pytest.raises(ValueError, stc.save, tmpdir.join('tmp'), ftype='foo') out_name = tmpdir.join('tmp') stc.save(out_name, ftype='h5') stc.save(out_name, ftype='h5') # test overwrite stc3 = read_source_estimate(out_name) stc4 = read_source_estimate(out_name + '-stc') stc5 = read_source_estimate(out_name + '-stc.h5') pytest.raises(RuntimeError, read_source_estimate, out_name, subject='bar') for stc_new in stc3, stc4, stc5: assert_equal(stc_new.subject, stc.subject) assert_array_equal(stc_new.data, stc.data) assert_array_equal(stc_new.tmin, stc.tmin) assert_array_equal(stc_new.tstep, stc.tstep) assert_equal(len(stc_new.vertices), len(stc.vertices)) for v1, v2 in zip(stc_new.vertices, stc.vertices): assert_array_equal(v1, v2) def test_io_w(tmpdir): """Test IO for w files.""" stc = _fake_stc(n_time=1) w_fname = tmpdir.join('fake') stc.save(w_fname, ftype='w') src = read_source_estimate(w_fname) src.save(tmpdir.join('tmp'), ftype='w') src2 = read_source_estimate(tmpdir.join('tmp-lh.w')) assert_array_almost_equal(src.data, src2.data) assert_array_almost_equal(src.lh_vertno, src2.lh_vertno) assert_array_almost_equal(src.rh_vertno, src2.rh_vertno) def test_stc_arithmetic(): """Test arithmetic for STC files.""" stc = _fake_stc() data = stc.data.copy() vec_stc = _fake_vec_stc() vec_data = vec_stc.data.copy() out = list() for a in [data, stc, vec_data, vec_stc]: a = a + a * 3 + 3 * a - a ** 2 / 2 a += a a -= a with np.errstate(invalid='ignore'): a /= 2 * a a *= -a a += 2 a -= 1 a *= -1 a /= 2 b = 2 + a b = 2 - a b = +a assert_array_equal(b.data, a.data) with np.errstate(invalid='ignore'): a **= 3 out.append(a) assert_array_equal(out[0], out[1].data) assert_array_equal(out[2], out[3].data) assert_array_equal(stc.sqrt().data, np.sqrt(stc.data)) assert_array_equal(vec_stc.sqrt().data, np.sqrt(vec_stc.data)) assert_array_equal(abs(stc).data, abs(stc.data)) assert_array_equal(abs(vec_stc).data, abs(vec_stc.data)) stc_sum = stc.sum() assert_array_equal(stc_sum.data, stc.data.sum(1, keepdims=True)) stc_mean = stc.mean() assert_array_equal(stc_mean.data, stc.data.mean(1, keepdims=True)) vec_stc_mean = vec_stc.mean() assert_array_equal(vec_stc_mean.data, vec_stc.data.mean(2, keepdims=True)) @pytest.mark.slowtest @testing.requires_testing_data def test_stc_methods(): """Test stc methods lh_data, rh_data, bin(), resample().""" stc_ = read_source_estimate(fname_stc) # Make a vector version of the above source estimate x = stc_.data[:, np.newaxis, :] yz = np.zeros((x.shape[0], 2, x.shape[2])) vec_stc_ = VectorSourceEstimate( np.concatenate((x, yz), 1), stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject ) for stc in [stc_, vec_stc_]: # lh_data / rh_data assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)]) assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):]) # bin binned = stc.bin(.12) a = np.mean(stc.data[..., :np.searchsorted(stc.times, .12)], axis=-1) assert_array_equal(a, binned.data[..., 0]) stc = read_source_estimate(fname_stc) stc.subject = 'sample' label_lh = read_labels_from_annot('sample', 'aparc', 'lh', subjects_dir=subjects_dir)[0] label_rh = read_labels_from_annot('sample', 'aparc', 'rh', subjects_dir=subjects_dir)[0] label_both = label_lh + label_rh for label in (label_lh, label_rh, label_both): assert (isinstance(stc.shape, tuple) and len(stc.shape) == 2) stc_label = stc.in_label(label) if label.hemi != 'both': if label.hemi == 'lh': verts = stc_label.vertices[0] else: # label.hemi == 'rh': verts = stc_label.vertices[1] n_vertices_used = len(label.get_vertices_used(verts)) assert_equal(len(stc_label.data), n_vertices_used) stc_lh = stc.in_label(label_lh) pytest.raises(ValueError, stc_lh.in_label, label_rh) label_lh.subject = 'foo' pytest.raises(RuntimeError, stc.in_label, label_lh) stc_new = deepcopy(stc) o_sfreq = 1.0 / stc.tstep # note that using no padding for this STC reduces edge ringing... stc_new.resample(2 * o_sfreq, npad=0) assert (stc_new.data.shape[1] == 2 * stc.data.shape[1]) assert (stc_new.tstep == stc.tstep / 2) stc_new.resample(o_sfreq, npad=0) assert (stc_new.data.shape[1] == stc.data.shape[1]) assert (stc_new.tstep == stc.tstep) assert_array_almost_equal(stc_new.data, stc.data, 5) @testing.requires_testing_data def test_center_of_mass(): """Test computing the center of mass on an stc.""" stc = read_source_estimate(fname_stc) pytest.raises(ValueError, stc.center_of_mass, 'sample') stc.lh_data[:] = 0 vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir) assert (hemi == 1) # XXX Should design a fool-proof test case, but here were the # results: assert_equal(vertex, 124791) assert_equal(np.round(t, 2), 0.12) @testing.requires_testing_data @pytest.mark.parametrize('kind', ('surface', 'mixed')) @pytest.mark.parametrize('vector', (False, True)) def test_extract_label_time_course(kind, vector): """Test extraction of label time courses from (Mixed)SourceEstimate.""" n_stcs = 3 n_times = 50 src = read_inverse_operator(fname_inv)['src'] if kind == 'mixed': pytest.importorskip('nibabel') label_names = ('Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex') src += setup_volume_source_space( 'sample', pos=20., volume_label=label_names, subjects_dir=subjects_dir, add_interpolator=False) klass = MixedVectorSourceEstimate else: klass = VectorSourceEstimate if not vector: klass = klass._scalar_class vertices = [s['vertno'] for s in src] n_verts = np.array([len(v) for v in vertices]) vol_means = np.arange(-1, 1 - len(src), -1) vol_means_t = np.repeat(vol_means[:, np.newaxis], n_times, axis=1) # get some labels labels_lh = read_labels_from_annot('sample', hemi='lh', subjects_dir=subjects_dir) labels_rh = read_labels_from_annot('sample', hemi='rh', subjects_dir=subjects_dir) labels = list() labels.extend(labels_lh[:5]) labels.extend(labels_rh[:4]) n_labels = len(labels) label_tcs = dict( mean=np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))) label_tcs['max'] = label_tcs['mean'] # compute the mean with sign flip label_tcs['mean_flip'] = np.zeros_like(label_tcs['mean']) for i, label in enumerate(labels): label_tcs['mean_flip'][i] = i * np.mean( label_sign_flip(label, src[:2])) # generate some stc's with known data stcs = list() pad = (((0, 0), (2, 0), (0, 0)), 'constant') for i in range(n_stcs): data = np.zeros((n_verts.sum(), n_times)) # set the value of the stc within each label for j, label in enumerate(labels): if label.hemi == 'lh': idx = np.intersect1d(vertices[0], label.vertices) idx = np.searchsorted(vertices[0], idx) elif label.hemi == 'rh': idx = np.intersect1d(vertices[1], label.vertices) idx = len(vertices[0]) + np.searchsorted(vertices[1], idx) data[idx] = label_tcs['mean'][j] for j in range(len(vol_means)): offset = n_verts[:2 + j].sum() data[offset:offset + n_verts[j]] = vol_means[j] if vector: # the values it on the Z axis data = np.pad(data[:, np.newaxis], *pad) this_stc = klass(data, vertices, 0, 1) stcs.append(this_stc) if vector: for key in label_tcs: label_tcs[key] = np.pad(label_tcs[key][:, np.newaxis], *pad) vol_means_t = np.pad(vol_means_t[:, np.newaxis], *pad) # test some invalid inputs with pytest.raises(ValueError, match="Invalid value for the 'mode'"): extract_label_time_course(stcs, labels, src, mode='notamode') # have an empty label empty_label = labels[0].copy() empty_label.vertices += 1000000 with pytest.raises(ValueError, match='does not contain any vertices'): extract_label_time_course(stcs, empty_label, src) # but this works: with pytest.warns(RuntimeWarning, match='does not contain any vertices'): tc = extract_label_time_course(stcs, empty_label, src, allow_empty=True) end_shape = (3, n_times) if vector else (n_times,) for arr in tc: assert arr.shape == (1 + len(vol_means),) + end_shape assert_array_equal(arr[:1], np.zeros((1,) + end_shape)) if len(vol_means): assert_array_equal(arr[1:], vol_means_t) # test the different modes modes = ['mean', 'mean_flip', 'pca_flip', 'max', 'auto'] for mode in modes: if vector and mode not in ('mean', 'max', 'auto'): with pytest.raises(ValueError, match='when using a vector'): extract_label_time_course(stcs, labels, src, mode=mode) continue label_tc = extract_label_time_course(stcs, labels, src, mode=mode) label_tc_method = [stc.extract_label_time_course(labels, src, mode=mode) for stc in stcs] assert (len(label_tc) == n_stcs) assert (len(label_tc_method) == n_stcs) for tc1, tc2 in zip(label_tc, label_tc_method): assert tc1.shape == (n_labels + len(vol_means),) + end_shape assert tc2.shape == (n_labels + len(vol_means),) + end_shape assert_allclose(tc1, tc2, rtol=1e-8, atol=1e-16) if mode == 'auto': use_mode = 'mean' if vector else 'mean_flip' else: use_mode = mode # XXX we don't check pca_flip, probably should someday... if use_mode in ('mean', 'max', 'mean_flip'): assert_array_almost_equal(tc1[:n_labels], label_tcs[use_mode]) assert_array_almost_equal(tc1[n_labels:], vol_means_t) # test label with very few vertices (check SVD conditionals) label = Label(vertices=src[0]['vertno'][:2], hemi='lh') x = label_sign_flip(label, src[:2]) assert (len(x) == 2) label = Label(vertices=[], hemi='lh') x = label_sign_flip(label, src[:2]) assert (x.size == 0) @testing.requires_testing_data @pytest.mark.parametrize('label_type, mri_res, vector, test_label, cf, call', [ (str, False, False, False, 'head', 'meth'), # head frame (str, False, False, str, 'mri', 'func'), # fastest, default for testing (str, False, True, int, 'mri', 'func'), # vector (str, True, False, False, 'mri', 'func'), # mri_resolution (list, True, False, False, 'mri', 'func'), # volume label as list (dict, True, False, False, 'mri', 'func'), # volume label as dict ]) def test_extract_label_time_course_volume( src_volume_labels, label_type, mri_res, vector, test_label, cf, call): """Test extraction of label time courses from Vol(Vector)SourceEstimate.""" src_labels, volume_labels, lut = src_volume_labels n_tot = 46 assert n_tot == len(src_labels) inv = read_inverse_operator(fname_inv_vol) if cf == 'head': src = inv['src'] assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD rr = apply_trans(invert_transform(inv['mri_head_t']), src[0]['rr']) else: assert cf == 'mri' src = read_source_spaces(fname_src_vol) assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI rr = src[0]['rr'] for s in src_labels: assert_allclose(s['rr'], rr, atol=1e-7) assert len(src) == 1 and src.kind == 'volume' klass = VolVectorSourceEstimate if not vector: klass = klass._scalar_class vertices = [src[0]['vertno']] n_verts = len(src[0]['vertno']) n_times = 50 data = vertex_values = np.arange(1, n_verts + 1) end_shape = (n_times,) if vector: end_shape = (3,) + end_shape data = np.pad(data[:, np.newaxis], ((0, 0), (2, 0)), 'constant') data = np.repeat(data[..., np.newaxis], n_times, -1) stcs = [klass(data.astype(float), vertices, 0, 1)] def eltc(*args, **kwargs): if call == 'func': return extract_label_time_course(stcs, *args, **kwargs) else: assert call == 'meth' return [stcs[0].extract_label_time_course(*args, **kwargs)] with pytest.raises(RuntimeError, match='atlas vox_mri_t does not match'): eltc(fname_fs_t1, src, mri_resolution=mri_res) assert len(src_labels) == 46 # includes unknown assert_array_equal( src[0]['vertno'], # src includes some in "unknown" space np.sort(np.concatenate([s['vertno'] for s in src_labels]))) # spot check assert src_labels[-1]['seg_name'] == 'CC_Anterior' assert src[0]['nuse'] == 4157 assert len(src[0]['vertno']) == 4157 assert sum(s['nuse'] for s in src_labels) == 4157 assert_array_equal(src_labels[-1]['vertno'], [8011, 8032, 8557]) assert_array_equal( np.where(np.in1d(src[0]['vertno'], [8011, 8032, 8557]))[0], [2672, 2688, 2995]) # triage "labels" argument if mri_res: # All should be there missing = [] else: # Nearest misses these missing = ['Left-vessel', 'Right-vessel', '5th-Ventricle', 'non-WM-hypointensities'] n_want = len(src_labels) if label_type is str: labels = fname_aseg elif label_type is list: labels = (fname_aseg, volume_labels) else: assert label_type is dict labels = (fname_aseg, {k: lut[k] for k in volume_labels}) assert mri_res assert len(missing) == 0 # we're going to add one that won't exist missing = ['intentionally_bad'] labels[1][missing[0]] = 10000 n_want += 1 n_tot += 1 n_want -= len(missing) # actually do the testing if cf == 'head' and not mri_res: # some missing with pytest.warns(RuntimeWarning, match='any vertices'): eltc(labels, src, allow_empty=True, mri_resolution=mri_res) for mode in ('mean', 'max'): with catch_logging() as log: label_tc = eltc(labels, src, mode=mode, allow_empty='ignore', mri_resolution=mri_res, verbose=True) log = log.getvalue() assert re.search('^Reading atlas.*aseg\\.mgz\n', log) is not None if len(missing): # assert that the missing ones get logged assert 'does not contain' in log assert repr(missing) in log else: assert 'does not contain' not in log assert '\n%d/%d atlas regions had at least' % (n_want, n_tot) in log assert len(label_tc) == 1 label_tc = label_tc[0] assert label_tc.shape == (n_tot,) + end_shape if vector: assert_array_equal(label_tc[:, :2], 0.) label_tc = label_tc[:, 2] assert label_tc.shape == (n_tot, n_times) # let's test some actual values by trusting the masks provided by # setup_volume_source_space. mri_resolution=True does some # interpolation so we should not expect equivalence, False does # nearest so we should. if mri_res: rtol = 0.2 if mode == 'mean' else 0.8 # max much more sensitive else: rtol = 0. for si, s in enumerate(src_labels): func = dict(mean=np.mean, max=np.max)[mode] these = vertex_values[np.in1d(src[0]['vertno'], s['vertno'])] assert len(these) == s['nuse'] if si == 0 and s['seg_name'] == 'Unknown': continue # unknown is crappy if s['nuse'] == 0: want = 0. if mri_res: # this one is totally due to interpolation, so no easy # test here continue else: want = func(these) assert_allclose(label_tc[si], want, atol=1e-6, rtol=rtol) # compare with in_label, only on every fourth for speed if test_label is not False and si % 4 == 0: label = s['seg_name'] if test_label is int: label = lut[label] in_label = stcs[0].in_label( label, fname_aseg, src).data assert in_label.shape == (s['nuse'],) + end_shape if vector: assert_array_equal(in_label[:, :2], 0.) in_label = in_label[:, 2] if want == 0: assert in_label.shape[0] == 0 else: in_label = func(in_label) assert_allclose(in_label, want, atol=1e-6, rtol=rtol) if mode == 'mean' and not vector: # check the reverse if label_type is dict: ctx = pytest.warns(RuntimeWarning, match='does not contain') else: ctx = nullcontext() with ctx: stc_back = labels_to_stc(labels, label_tc, src=src) assert stc_back.data.shape == stcs[0].data.shape corr = np.corrcoef(stc_back.data.ravel(), stcs[0].data.ravel())[0, 1] assert 0.6 < corr < 0.63 assert_allclose(_varexp(label_tc, label_tc), 1.) ve = _varexp(stc_back.data, stcs[0].data) assert 0.83 < ve < 0.85 with pytest.warns(None): # ignore warnings about no output label_tc_rt = extract_label_time_course( stc_back, labels, src=src, mri_resolution=mri_res, allow_empty=True) assert label_tc_rt.shape == label_tc.shape corr = np.corrcoef(label_tc.ravel(), label_tc_rt.ravel())[0, 1] lower, upper = (0.99, 0.999) if mri_res else (0.95, 0.97) assert lower < corr < upper ve = _varexp(label_tc_rt, label_tc) lower, upper = (0.99, 0.999) if mri_res else (0.97, 0.99) assert lower < ve < upper def _varexp(got, want): return max( 1 - np.linalg.norm(got.ravel() - want.ravel()) ** 2 / np.linalg.norm(want) ** 2, 0.) @testing.requires_testing_data def test_extract_label_time_course_equiv(): """Test extraction of label time courses from stc equivalences.""" label = read_labels_from_annot('sample', 'aparc', 'lh', regexp='transv', subjects_dir=subjects_dir) assert len(label) == 1 label = label[0] inv = read_inverse_operator(fname_inv) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01) stc = apply_inverse(evoked, inv, pick_ori='normal', label=label) stc_full = apply_inverse(evoked, inv, pick_ori='normal') stc_in_label = stc_full.in_label(label) mean = stc.extract_label_time_course(label, inv['src']) mean_2 = stc_in_label.extract_label_time_course(label, inv['src']) assert_allclose(mean, mean_2) inv['src'][0]['vertno'] = np.array([], int) assert len(stc_in_label.vertices[0]) == 22 with pytest.raises(ValueError, match='22/22 left hemisphere.*missing'): stc_in_label.extract_label_time_course(label, inv['src']) def _my_trans(data): """FFT that adds an additional dimension by repeating result.""" data_t = fft(data) data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2) return data_t, None def test_transform_data(): """Test applying linear (time) transform to data.""" # make up some data n_sensors, n_vertices, n_times = 10, 20, 4 kernel = rng.randn(n_vertices, n_sensors) sens_data = rng.randn(n_sensors, n_times) vertices = [np.arange(n_vertices)] data = np.dot(kernel, sens_data) for idx, tmin_idx, tmax_idx in\ zip([None, np.arange(n_vertices // 2, n_vertices)], [None, 1], [None, 3]): if idx is None: idx_use = slice(None, None) else: idx_use = idx data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx]) for stc_data in (data, (kernel, sens_data)): stc = VolSourceEstimate(stc_data, vertices=vertices, tmin=0., tstep=1.) stc_data_t = stc.transform_data(_my_trans, idx=idx, tmin_idx=tmin_idx, tmax_idx=tmax_idx) assert_allclose(data_f, stc_data_t) # bad sens_data sens_data = sens_data[..., np.newaxis] with pytest.raises(ValueError, match='sensor data must have 2'): VolSourceEstimate((kernel, sens_data), vertices, 0, 1) def test_transform(): """Test applying linear (time) transform to data.""" # make up some data n_verts_lh, n_verts_rh, n_times = 10, 10, 10 vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)] data = rng.randn(n_verts_lh + n_verts_rh, n_times) stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1) # data_t.ndim > 2 & copy is True stcs_t = stc.transform(_my_trans, copy=True) assert (isinstance(stcs_t, list)) assert_array_equal(stc.times, stcs_t[0].times) assert_equal(stc.vertices, stcs_t[0].vertices) data = np.concatenate((stcs_t[0].data[:, :, None], stcs_t[1].data[:, :, None]), axis=2) data_t = stc.transform_data(_my_trans) assert_array_equal(data, data_t) # check against stc.transform_data() # data_t.ndim > 2 & copy is False pytest.raises(ValueError, stc.transform, _my_trans, copy=False) # data_t.ndim = 2 & copy is True tmp = deepcopy(stc) stc_t = stc.transform(np.abs, copy=True) assert (isinstance(stc_t, SourceEstimate)) assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original? # data_t.ndim = 2 & copy is False times = np.round(1000 * stc.times) verts = np.arange(len(stc.lh_vertno), len(stc.lh_vertno) + len(stc.rh_vertno), 1) verts_rh = stc.rh_vertno tmin_idx = np.searchsorted(times, 0) tmax_idx = np.searchsorted(times, 501) # Include 500ms in the range data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx, tmax_idx=tmax_idx) stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False) assert (isinstance(stc, SourceEstimate)) assert_equal(stc.tmin, 0.) assert_equal(stc.times[-1], 0.5) assert_equal(len(stc.vertices[0]), 0) assert_equal(stc.vertices[1], verts_rh) assert_array_equal(stc.data, data_t) times = np.round(1000 * stc.times) tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250) data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx) stc.transform(np.abs, tmin=0, tmax=250, copy=False) assert_equal(stc.tmin, 0.) assert_equal(stc.times[-1], 0.2) assert_array_equal(stc.data, data_t) @requires_sklearn def test_spatio_temporal_tris_adjacency(): """Test spatio-temporal adjacency from triangles.""" tris = np.array([[0, 1, 2], [3, 4, 5]]) adjacency = spatio_temporal_tris_adjacency(tris, 2) x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1] components = stats.cluster_level._get_components(np.array(x), adjacency) # _get_components works differently now... old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1] new_fmt = np.array(old_fmt) new_fmt = [np.nonzero(new_fmt == v)[0] for v in np.unique(new_fmt[new_fmt >= 0])] assert len(new_fmt) == len(components) for c, n in zip(components, new_fmt): assert_array_equal(c, n) @testing.requires_testing_data def test_spatio_temporal_src_adjacency(): """Test spatio-temporal adjacency from source spaces.""" tris = np.array([[0, 1, 2], [3, 4, 5]]) src = [dict(), dict()] adjacency = spatio_temporal_tris_adjacency(tris, 2).todense() assert_allclose(np.diag(adjacency), 1.) src[0]['use_tris'] = np.array([[0, 1, 2]]) src[1]['use_tris'] = np.array([[0, 1, 2]]) src[0]['vertno'] = np.array([0, 1, 2]) src[1]['vertno'] = np.array([0, 1, 2]) src[0]['type'] = 'surf' src[1]['type'] = 'surf' adjacency2 = spatio_temporal_src_adjacency(src, 2) assert_array_equal(adjacency2.todense(), adjacency) # add test for dist adjacency src[0]['dist'] = np.ones((3, 3)) - np.eye(3) src[1]['dist'] = np.ones((3, 3)) - np.eye(3) src[0]['vertno'] = [0, 1, 2] src[1]['vertno'] = [0, 1, 2] src[0]['type'] = 'surf' src[1]['type'] = 'surf' adjacency3 = spatio_temporal_src_adjacency(src, 2, dist=2) assert_array_equal(adjacency3.todense(), adjacency) # add test for source space adjacency with omitted vertices inverse_operator = read_inverse_operator(fname_inv) src_ = inverse_operator['src'] with pytest.warns(RuntimeWarning, match='will have holes'): adjacency = spatio_temporal_src_adjacency(src_, n_times=2) a = adjacency.shape[0] / 2 b = sum([s['nuse'] for s in inverse_operator['src']]) assert (a == b) assert_equal(grade_to_tris(5).shape, [40960, 3]) @requires_pandas def test_to_data_frame(): """Test stc Pandas exporter.""" n_vert, n_times = 10, 5 vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') stc_vol = VolSourceEstimate(data, vertices=vertices[:1], tmin=0, tstep=1, subject='sample') for stc in [stc_surf, stc_vol]: df = stc.to_data_frame() # test data preservation (first 2 dataframe elements are subj & time) assert_array_equal(df.values.T[2:], stc.data) # test long format df_long = stc.to_data_frame(long_format=True) assert(len(df_long) == stc.data.size) expected = ('subject', 'time', 'source', 'value') assert set(expected) == set(df_long.columns) @requires_pandas @pytest.mark.parametrize('index', ('time', ['time', 'subject'], None)) def test_to_data_frame_index(index): """Test index creation in stc Pandas exporter.""" n_vert, n_times = 10, 5 vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') df = stc.to_data_frame(index=index) # test index setting if not isinstance(index, list): index = [index] assert (df.index.names == index) # test that non-indexed data were present as columns non_index = list(set(['time', 'subject']) - set(index)) if len(non_index): assert all(np.in1d(non_index, df.columns)) @pytest.mark.parametrize('kind', ('surface', 'mixed', 'volume')) @pytest.mark.parametrize('vector', (False, True)) @pytest.mark.parametrize('n_times', (5, 1)) def test_get_peak(kind, vector, n_times): """Test peak getter.""" n_vert = 10 vertices = [np.arange(n_vert)] if kind == 'surface': klass = VectorSourceEstimate vertices += [np.empty(0, int)] elif kind == 'mixed': klass = MixedVectorSourceEstimate vertices += [np.empty(0, int), np.empty(0, int)] else: assert kind == 'volume' klass = VolVectorSourceEstimate data = np.zeros((n_vert, n_times)) data[1, -1] = 1 if vector: data = np.repeat(data[:, np.newaxis], 3, 1) else: klass = klass._scalar_class stc = klass(data, vertices, 0, 1) with pytest.raises(ValueError, match='out of bounds'): stc.get_peak(tmin=-100) with pytest.raises(ValueError, match='out of bounds'): stc.get_peak(tmax=90) with pytest.raises(ValueError, match='smaller or equal' if n_times > 1 else 'out of'): stc.get_peak(tmin=0.002, tmax=0.001) vert_idx, time_idx = stc.get_peak() vertno = np.concatenate(stc.vertices) assert vert_idx in vertno assert time_idx in stc.times data_idx, time_idx = stc.get_peak(vert_as_index=True, time_as_index=True) if vector: use_data = stc.magnitude().data else: use_data = stc.data assert data_idx == 1 assert time_idx == n_times - 1 assert data_idx == np.argmax(np.abs(use_data[:, time_idx])) assert time_idx == np.argmax(np.abs(use_data[data_idx, :])) if kind == 'surface': data_idx_2, time_idx_2 = stc.get_peak( vert_as_index=True, time_as_index=True, hemi='lh') assert data_idx_2 == data_idx assert time_idx_2 == time_idx with pytest.raises(RuntimeError, match='no vertices'): stc.get_peak(hemi='rh') @requires_h5py @testing.requires_testing_data def test_mixed_stc(tmpdir): """Test source estimate from mixed source space.""" N = 90 # number of sources T = 2 # number of time points S = 3 # number of source spaces data = rng.randn(N, T) vertno = S * [np.arange(N // S)] # make sure error is raised if vertices are not a list of length >= 2 pytest.raises(ValueError, MixedSourceEstimate, data=data, vertices=[np.arange(N)]) stc = MixedSourceEstimate(data, vertno, 0, 1) # make sure error is raised for plotting surface with volume source fname = tmpdir.join('mixed-stc.h5') stc.save(fname) stc_out = read_source_estimate(fname) assert_array_equal(stc_out.vertices, vertno) assert_array_equal(stc_out.data, data) assert stc_out.tmin == 0 assert stc_out.tstep == 1 assert isinstance(stc_out, MixedSourceEstimate) @requires_h5py @pytest.mark.parametrize('klass, kind', [ (VectorSourceEstimate, 'surf'), (VolVectorSourceEstimate, 'vol'), (VolVectorSourceEstimate, 'discrete'), (MixedVectorSourceEstimate, 'mixed'), ]) @pytest.mark.parametrize('dtype', [ np.float32, np.float64, np.complex64, np.complex128]) def test_vec_stc_basic(tmpdir, klass, kind, dtype): """Test (vol)vector source estimate.""" nn = np.array([ [1, 0, 0], [0, 1, 0], [np.sqrt(1. / 2.), 0, np.sqrt(1. / 2.)], [np.sqrt(1 / 3.)] * 3 ], np.float32) data = np.array([ [1, 0, 0], [0, 2, 0], [-3, 0, 0], [1, 1, 1], ], dtype)[:, :, np.newaxis] amplitudes = np.array([1, 2, 3, np.sqrt(3)], dtype) magnitudes = amplitudes.copy() normals = np.array([1, 2, -3. / np.sqrt(2), np.sqrt(3)], dtype) if dtype in (np.complex64, np.complex128): data *= 1j amplitudes *= 1j normals *= 1j directions = np.array( [[1, 0, 0], [0, 1, 0], [-1, 0, 0], [1. / np.sqrt(3)] * 3]) vol_kind = kind if kind in ('discrete', 'vol') else 'vol' vol_src = SourceSpaces([dict(nn=nn, type=vol_kind)]) assert vol_src.kind == dict(vol='volume').get(vol_kind, vol_kind) vol_verts = [np.arange(4)] surf_src = SourceSpaces([dict(nn=nn[:2], type='surf'), dict(nn=nn[2:], type='surf')]) assert surf_src.kind == 'surface' surf_verts = [np.array([0, 1]), np.array([0, 1])] if klass is VolVectorSourceEstimate: src = vol_src verts = vol_verts elif klass is VectorSourceEstimate: src = surf_src verts = surf_verts if klass is MixedVectorSourceEstimate: src = surf_src + vol_src verts = surf_verts + vol_verts assert src.kind == 'mixed' data = np.tile(data, (2, 1, 1)) amplitudes = np.tile(amplitudes, 2) magnitudes = np.tile(magnitudes, 2) normals = np.tile(normals, 2) directions = np.tile(directions, (2, 1)) stc = klass(data, verts, 0, 1, 'foo') amplitudes = amplitudes[:, np.newaxis] magnitudes = magnitudes[:, np.newaxis] # Magnitude of the vectors assert_array_equal(stc.magnitude().data, magnitudes) # Vector components projected onto the vertex normals if kind in ('vol', 'mixed'): with pytest.raises(RuntimeError, match='surface or discrete'): stc.project('normal', src)[0] else: normal = stc.project('normal', src)[0] assert_allclose(normal.data[:, 0], normals) # Maximal-variance component, either to keep amps pos or to align to src-nn projected, got_directions = stc.project('pca') assert_allclose(got_directions, directions) assert_allclose(projected.data, amplitudes) projected, got_directions = stc.project('pca', src) flips = np.array([[1], [1], [-1.], [1]]) if klass is MixedVectorSourceEstimate: flips = np.tile(flips, (2, 1)) assert_allclose(got_directions, directions * flips) assert_allclose(projected.data, amplitudes * flips) out_name = tmpdir.join('temp.h5') stc.save(out_name) stc_read = read_source_estimate(out_name) assert_allclose(stc.data, stc_read.data) assert len(stc.vertices) == len(stc_read.vertices) for v1, v2 in zip(stc.vertices, stc_read.vertices): assert_array_equal(v1, v2) stc = klass(data[:, :, 0], verts, 0, 1) # upbroadcast assert stc.data.shape == (len(data), 3, 1) # Bad data with pytest.raises(ValueError, match='must have shape.*3'): klass(data[:, :2], verts, 0, 1) data = data[:, :, np.newaxis] with pytest.raises(ValueError, match='3 dimensions for .*VectorSource'): klass(data, verts, 0, 1) @pytest.mark.parametrize('real', (True, False)) def test_source_estime_project(real): """Test projecting a source estimate onto direction of max power.""" n_src, n_times = 4, 100 rng = np.random.RandomState(0) data = rng.randn(n_src, 3, n_times) if not real: data = data + 1j * rng.randn(n_src, 3, n_times) assert data.dtype == np.complex128 else: assert data.dtype == np.float64 # Make sure that the normal we get maximizes the power # (i.e., minimizes the negative power) want_nn = np.empty((n_src, 3)) for ii in range(n_src): x0 = np.ones(3) def objective(x): x = x / np.linalg.norm(x) return -np.linalg.norm(np.dot(x, data[ii])) want_nn[ii] = fmin_cobyla(objective, x0, (), rhobeg=0.1, rhoend=1e-6) want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True) stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1) stc_max, directions = stc.project('pca') flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True)) directions *= flips assert_allclose(directions, want_nn, atol=2e-6) @testing.requires_testing_data def test_source_estime_project_label(): """Test projecting a source estimate onto direction of max power.""" fwd = read_forward_solution(fname_fwd) fwd = pick_types_forward(fwd, meg=True, eeg=False) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0] noise_cov = read_cov(fname_cov) free = make_inverse_operator( evoked.info, fwd, noise_cov, loose=1.) stc_free = apply_inverse(evoked, free, pick_ori='vector') stc_pca = stc_free.project('pca', fwd['src'])[0] labels_lh = read_labels_from_annot('sample', 'aparc', 'lh', subjects_dir=subjects_dir) new_label = labels_lh[0] + labels_lh[1] stc_in_label = stc_free.in_label(new_label) stc_pca_in_label = stc_pca.in_label(new_label) stc_in_label_pca = stc_in_label.project('pca', fwd['src'])[0] assert_array_equal(stc_pca_in_label.data, stc_in_label_pca.data) @pytest.fixture(scope='module', params=[testing._pytest_param()]) def invs(): """Inverses of various amounts of loose.""" fwd = read_forward_solution(fname_fwd) fwd = pick_types_forward(fwd, meg=True, eeg=False) fwd_surf = convert_forward_solution(fwd, surf_ori=True) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0] noise_cov = read_cov(fname_cov) free = make_inverse_operator( evoked.info, fwd, noise_cov, loose=1.) free_surf = make_inverse_operator( evoked.info, fwd_surf, noise_cov, loose=1.) freeish = make_inverse_operator( evoked.info, fwd, noise_cov, loose=0.9999) fixed = make_inverse_operator( evoked.info, fwd, noise_cov, loose=0.) fixedish = make_inverse_operator( evoked.info, fwd, noise_cov, loose=0.0001) assert_allclose(free['source_nn'], np.kron(np.ones(fwd['nsource']), np.eye(3)).T, atol=1e-7) # This is the one exception: assert not np.allclose(free['source_nn'], free_surf['source_nn']) assert_allclose(free['source_nn'], np.tile(np.eye(3), (free['nsource'], 1)), atol=1e-7) # All others are similar: for other in (freeish, fixedish): assert_allclose(free_surf['source_nn'], other['source_nn'], atol=1e-7) assert_allclose( free_surf['source_nn'][2::3], fixed['source_nn'], atol=1e-7) expected_nn = np.concatenate([_get_src_nn(s) for s in fwd['src']]) assert_allclose(fixed['source_nn'], expected_nn, atol=1e-7) return evoked, free, free_surf, freeish, fixed, fixedish bad_normal = pytest.param( 'normal', marks=pytest.mark.xfail(raises=AssertionError)) @pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector']) def test_vec_stc_inv_free(invs, pick_ori): """Test vector STC behavior with two free-orientation inverses.""" evoked, free, free_surf, _, _, _ = invs stc_free = apply_inverse(evoked, free, pick_ori=pick_ori) stc_free_surf = apply_inverse(evoked, free_surf, pick_ori=pick_ori) assert_allclose(stc_free.data, stc_free_surf.data, atol=1e-5) @pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector']) def test_vec_stc_inv_free_surf(invs, pick_ori): """Test vector STC behavior with free and free-ish orientation invs.""" evoked, _, free_surf, freeish, _, _ = invs stc_free = apply_inverse(evoked, free_surf, pick_ori=pick_ori) stc_freeish = apply_inverse(evoked, freeish, pick_ori=pick_ori) assert_allclose(stc_free.data, stc_freeish.data, atol=1e-3) @pytest.mark.parametrize('pick_ori', (None, 'normal', 'vector')) def test_vec_stc_inv_fixed(invs, pick_ori): """Test vector STC behavior with fixed-orientation inverses.""" evoked, _, _, _, fixed, fixedish = invs stc_fixed = apply_inverse(evoked, fixed) stc_fixed_vector = apply_inverse(evoked, fixed, pick_ori='vector') assert_allclose(stc_fixed.data, stc_fixed_vector.project('normal', fixed['src'])[0].data) stc_fixedish = apply_inverse(evoked, fixedish, pick_ori=pick_ori) if pick_ori == 'vector': assert_allclose(stc_fixed_vector.data, stc_fixedish.data, atol=1e-2) # two ways here: with magnitude... assert_allclose( abs(stc_fixed).data, stc_fixedish.magnitude().data, atol=1e-2) # ... and when picking the normal (signed) stc_fixedish = stc_fixedish.project('normal', fixedish['src'])[0] elif pick_ori is None: stc_fixed = abs(stc_fixed) else: assert pick_ori == 'normal' # no need to modify assert_allclose(stc_fixed.data, stc_fixedish.data, atol=1e-2) @testing.requires_testing_data def test_epochs_vector_inverse(): """Test vector inverse consistency between evoked and epochs.""" raw = read_raw_fif(fname_raw) events = find_events(raw, stim_channel='STI 014')[:2] reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6) epochs = Epochs(raw, events, None, 0, 0.01, baseline=None, reject=reject, preload=True) assert_equal(len(epochs), 2) evoked = epochs.average(picks=range(len(epochs.ch_names))) inv = read_inverse_operator(fname_inv) method = "MNE" snr = 3. lambda2 = 1. / snr ** 2 stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method, pick_ori='vector', return_generator=False) stc_epo = np.mean(stcs_epo) stc_evo = apply_inverse(evoked, inv, lambda2, method=method, pick_ori='vector') assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0) @requires_sklearn @testing.requires_testing_data def test_vol_adjacency(): """Test volume adjacency.""" vol = read_source_spaces(fname_vsrc) pytest.raises(ValueError, spatial_src_adjacency, vol, dist=1.) adjacency = spatial_src_adjacency(vol) n_vertices = vol[0]['inuse'].sum() assert_equal(adjacency.shape, (n_vertices, n_vertices)) assert (np.all(adjacency.data == 1)) assert (isinstance(adjacency, sparse.coo_matrix)) adjacency2 = spatio_temporal_src_adjacency(vol, n_times=2) assert_equal(adjacency2.shape, (2 * n_vertices, 2 * n_vertices)) assert (np.all(adjacency2.data == 1)) @testing.requires_testing_data def test_spatial_src_adjacency(): """Test spatial adjacency functionality.""" # oct src = read_source_spaces(fname_src) assert src[0]['dist'] is not None # distance info with pytest.warns(RuntimeWarning, match='will have holes'): con = spatial_src_adjacency(src).toarray() con_dist = spatial_src_adjacency(src, dist=0.01).toarray() assert (con == con_dist).mean() > 0.75 # ico src = read_source_spaces(fname_src_fs) con = spatial_src_adjacency(src).tocsr() con_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr() assert con.shape == con_tris.shape assert_array_equal(con.data, con_tris.data) assert_array_equal(con.indptr, con_tris.indptr) assert_array_equal(con.indices, con_tris.indices) # one hemi con_lh = spatial_src_adjacency(src[:1]).tocsr() con_lh_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr() con_lh_tris = con_lh_tris[:10242, :10242].tocsr() assert_array_equal(con_lh.data, con_lh_tris.data) assert_array_equal(con_lh.indptr, con_lh_tris.indptr) assert_array_equal(con_lh.indices, con_lh_tris.indices) @requires_sklearn @requires_nibabel() @testing.requires_testing_data def test_vol_mask(): """Test extraction of volume mask.""" src = read_source_spaces(fname_vsrc) mask = _get_vol_mask(src) # Let's use an alternative way that should be equivalent vertices = [src[0]['vertno']] n_vertices = len(vertices[0]) data = (1 + np.arange(n_vertices))[:, np.newaxis] stc_tmp = VolSourceEstimate(data, vertices, tmin=0., tstep=1.) img = stc_tmp.as_volume(src, mri_resolution=False) img_data = _get_img_fdata(img)[:, :, :, 0].T mask_nib = (img_data != 0) assert_array_equal(img_data[mask_nib], data[:, 0]) assert_array_equal(np.where(mask_nib.ravel())[0], src[0]['vertno']) assert_array_equal(mask, mask_nib) assert_array_equal(img_data.shape, mask.shape) @testing.requires_testing_data def test_stc_near_sensors(tmpdir): """Test stc_near_sensors.""" info = read_info(fname_evoked) # pick the left EEG sensors picks = pick_types(info, meg=False, eeg=True, exclude=()) picks = [pick for pick in picks if info['chs'][pick]['loc'][0] < 0] pick_info(info, picks, copy=False) info['projs'] = [] info['bads'] = [] assert info['nchan'] == 33 evoked = EvokedArray(np.eye(info['nchan']), info) trans = read_trans(fname_fwd) assert trans['to'] == FIFF.FIFFV_COORD_HEAD this_dir = str(tmpdir) # testing does not have pial, so fake it os.makedirs(op.join(this_dir, 'sample', 'surf')) for hemi in ('lh', 'rh'): copyfile(op.join(subjects_dir, 'sample', 'surf', f'{hemi}.white'), op.join(this_dir, 'sample', 'surf', f'{hemi}.pial')) # here we use a distance is smaller than the inter-sensor distance kwargs = dict(subject='sample', trans=trans, subjects_dir=this_dir, verbose=True, distance=0.005) with pytest.raises(ValueError, match='No appropriate channels'): stc_near_sensors(evoked, **kwargs) evoked.set_channel_types({ch_name: 'ecog' for ch_name in evoked.ch_names}) with catch_logging() as log: stc = stc_near_sensors(evoked, **kwargs) log = log.getvalue() assert 'Minimum projected intra-sensor distance: 7.' in log # 7.4 # this should be left-hemisphere dominant assert 5000 > len(stc.vertices[0]) > 4000 assert 200 > len(stc.vertices[1]) > 100 # and at least one vertex should have the channel values dists = cdist(stc.data, evoked.data) assert np.isclose(dists, 0., atol=1e-6).any(0).all() src = read_source_spaces(fname_src) # uses "white" but should be okay for s in src: transform_surface_to(s, 'head', trans, copy=False) assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD stc_src = stc_near_sensors(evoked, src=src, **kwargs) assert len(stc_src.data) == 7928 with pytest.warns(RuntimeWarning, match='not included'): # some removed stc_src_full = compute_source_morph( stc_src, 'sample', 'sample', smooth=5, spacing=None, subjects_dir=subjects_dir).apply(stc_src) lh_idx = np.searchsorted(stc_src_full.vertices[0], stc.vertices[0]) rh_idx = np.searchsorted(stc_src_full.vertices[1], stc.vertices[1]) rh_idx += len(stc_src_full.vertices[0]) sub_data = stc_src_full.data[np.concatenate([lh_idx, rh_idx])] assert sub_data.shape == stc.data.shape corr = np.corrcoef(stc.data.ravel(), sub_data.ravel())[0, 1] assert 0.6 < corr < 0.7 # now single-weighting mode stc_w = stc_near_sensors(evoked, mode='single', **kwargs) assert_array_less(stc_w.data, stc.data + 1e-3) # some tol assert len(stc_w.data) == len(stc.data) # at least one for each sensor should have projected right on it dists = cdist(stc_w.data, evoked.data) assert np.isclose(dists, 0., atol=1e-6).any(0).all() # finally, nearest mode: all should match stc_n = stc_near_sensors(evoked, mode='nearest', **kwargs) assert len(stc_n.data) == len(stc.data) # at least one for each sensor should have projected right on it dists = cdist(stc_n.data, evoked.data) assert np.isclose(dists, 0., atol=1e-6).any(1).all() # all vert eq some ch # these are EEG electrodes, so the distance 0.01 is too small for the # scalp+skull. Even at a distance of 33 mm EEG 060 is too far: with pytest.warns(RuntimeWarning, match='Channel missing in STC: EEG 060'): stc = stc_near_sensors(evoked, trans, 'sample', subjects_dir=this_dir, project=False, distance=0.033) assert stc.data.any(0).sum() == len(evoked.ch_names) - 1 # and now with volumetric projection src = read_source_spaces(fname_vsrc) with catch_logging() as log: stc_vol = stc_near_sensors(evoked, trans, 'sample', src=src, subjects_dir=subjects_dir, verbose=True, distance=0.033) assert isinstance(stc_vol, VolSourceEstimate) log = log.getvalue() assert '4157 volume vertices' in log @testing.requires_testing_data def test_stc_near_sensors_picks(): """Test using picks with stc_near_sensors.""" info = mne.io.read_raw_nirx(fname_nirx).info evoked = mne.EvokedArray(np.ones((len(info['ch_names']), 1)), info) src = mne.read_source_spaces(fname_src_fs) kwargs = dict( evoked=evoked, subject='fsaverage', trans='fsaverage', subjects_dir=subjects_dir, src=src, project=True) with pytest.raises(ValueError, match='No appropriate channels'): stc_near_sensors(**kwargs) picks = np.arange(len(info['ch_names'])) data = stc_near_sensors(picks=picks, **kwargs).data assert len(data) == 20484 assert (data >= 0).all() data = data[data > 0] n_pts = len(data) assert 500 < n_pts < 600 lo, hi = np.percentile(data, (5, 95)) assert 0.01 < lo < 0.1 assert 1.3 < hi < 1.7 # > 1 data = stc_near_sensors(picks=picks, mode='weighted', **kwargs).data assert (data >= 0).all() data = data[data > 0] assert len(data) == n_pts assert_array_equal(data, 1.) # values preserved def _make_morph_map_hemi_same(subject_from, subject_to, subjects_dir, reg_from, reg_to): return _make_morph_map_hemi(subject_from, subject_from, subjects_dir, reg_from, reg_from) @requires_nibabel() @testing.requires_testing_data @pytest.mark.parametrize('kind', ( pytest.param('volume', marks=[requires_version('dipy')]), 'surface', )) @pytest.mark.parametrize('scale', ((1.0, 0.8, 1.2), 1., 0.9)) def test_scale_morph_labels(kind, scale, monkeypatch, tmpdir): """Test label extraction, morphing, and MRI scaling relationships.""" tempdir = str(tmpdir) subject_from = 'sample' subject_to = 'small' testing_dir = op.join(subjects_dir, subject_from) from_dir = op.join(tempdir, subject_from) for root in ('mri', 'surf', 'label', 'bem'): os.makedirs(op.join(from_dir, root), exist_ok=True) for hemi in ('lh', 'rh'): for root, fname in (('surf', 'sphere'), ('surf', 'white'), ('surf', 'sphere.reg'), ('label', 'aparc.annot')): use_fname = op.join(root, f'{hemi}.{fname}') copyfile(op.join(testing_dir, use_fname), op.join(from_dir, use_fname)) for root, fname in (('mri', 'aseg.mgz'), ('mri', 'brain.mgz')): use_fname = op.join(root, fname) copyfile(op.join(testing_dir, use_fname), op.join(from_dir, use_fname)) del testing_dir if kind == 'surface': src_from = read_source_spaces(fname_src_3) assert src_from[0]['dist'] is None assert src_from[0]['nearest'] is not None # avoid patch calc src_from[0]['nearest'] = src_from[1]['nearest'] = None assert len(src_from) == 2 assert src_from[0]['nuse'] == src_from[1]['nuse'] == 258 klass = SourceEstimate labels_from = read_labels_from_annot( subject_from, subjects_dir=tempdir) n_labels = len(labels_from) write_source_spaces(op.join(tempdir, subject_from, 'bem', f'{subject_from}-oct-4-src.fif'), src_from) else: assert kind == 'volume' pytest.importorskip('dipy') src_from = read_source_spaces(fname_src_vol) src_from[0]['subject_his_id'] = subject_from labels_from = op.join( tempdir, subject_from, 'mri', 'aseg.mgz') n_labels = 46 assert op.isfile(labels_from) klass = VolSourceEstimate assert len(src_from) == 1 assert src_from[0]['nuse'] == 4157 write_source_spaces( op.join(from_dir, 'bem', 'sample-vol20-src.fif'), src_from) scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, annot=True, skip_fiducials=True, verbose=True, overwrite=True) if kind == 'surface': src_to = read_source_spaces( op.join(tempdir, subject_to, 'bem', f'{subject_to}-oct-4-src.fif')) labels_to = read_labels_from_annot( subject_to, subjects_dir=tempdir) # Save time since we know these subjects are identical monkeypatch.setattr(mne.morph_map, '_make_morph_map_hemi', _make_morph_map_hemi_same) else: src_to = read_source_spaces( op.join(tempdir, subject_to, 'bem', f'{subject_to}-vol20-src.fif')) labels_to = op.join( tempdir, subject_to, 'mri', 'aseg.mgz') # 1. Label->STC->Label for the given subject should be identity # (for surfaces at least; for volumes it's not as clean as this # due to interpolation) n_times = 50 rng = np.random.RandomState(0) label_tc = rng.randn(n_labels, n_times) # check that a random permutation of our labels yields a terrible # correlation corr = np.corrcoef(label_tc.ravel(), rng.permutation(label_tc).ravel())[0, 1] assert -0.06 < corr < 0.06 # project label activations to full source space with pytest.raises(ValueError, match='subject'): labels_to_stc(labels_from, label_tc, src=src_from, subject='foo') stc = labels_to_stc(labels_from, label_tc, src=src_from) assert stc.subject == 'sample' assert isinstance(stc, klass) label_tc_from = extract_label_time_course( stc, labels_from, src_from, mode='mean') if kind == 'surface': assert_allclose(label_tc, label_tc_from, rtol=1e-12, atol=1e-12) else: corr = np.corrcoef(label_tc.ravel(), label_tc_from.ravel())[0, 1] assert 0.93 < corr < 0.95 # # 2. Changing STC subject to the surrogate and then extracting # stc.subject = subject_to label_tc_to = extract_label_time_course( stc, labels_to, src_to, mode='mean') assert_allclose(label_tc_from, label_tc_to, rtol=1e-12, atol=1e-12) stc.subject = subject_from # # 3. Morphing STC to new subject then extracting # if isinstance(scale, tuple) and kind == 'volume': ctx = nullcontext() test_morph = True elif kind == 'surface': ctx = pytest.warns(RuntimeWarning, match='not included') test_morph = True else: ctx = nullcontext() test_morph = True with ctx: # vertices not included morph = compute_source_morph( src_from, subject_to=subject_to, src_to=src_to, subjects_dir=tempdir, niter_sdr=(), smooth=1, zooms=14., verbose=True) # speed up with higher zooms if kind == 'volume': got_affine = morph.pre_affine.affine want_affine = np.eye(4) want_affine.ravel()[::5][:3] = 1. / np.array(scale, float) # just a scaling (to within 1% if zooms=None, 20% with zooms=10) assert_allclose(want_affine[:, :3], got_affine[:, :3], atol=2e-1) assert got_affine[3, 3] == 1. # little translation (to within `limit` mm) move = np.linalg.norm(got_affine[:3, 3]) limit = 2. if scale == 1. else 12 assert move < limit, scale if test_morph: stc_to = morph.apply(stc) label_tc_to_morph = extract_label_time_course( stc_to, labels_to, src_to, mode='mean') if kind == 'volume': corr = np.corrcoef( label_tc.ravel(), label_tc_to_morph.ravel())[0, 1] if isinstance(scale, tuple): # some other fixed constant # min_, max_ = 0.84, 0.855 # zooms='auto' values min_, max_ = 0.57, 0.67 elif scale == 1: # min_, max_ = 0.85, 0.875 # zooms='auto' values min_, max_ = 0.72, 0.75 else: # min_, max_ = 0.84, 0.855 # zooms='auto' values min_, max_ = 0.61, 0.62 assert min_ < corr <= max_, scale else: assert_allclose( label_tc, label_tc_to_morph, atol=1e-12, rtol=1e-12) # # 4. The same round trip from (1) but in the warped space # stc = labels_to_stc(labels_to, label_tc, src=src_to) assert isinstance(stc, klass) label_tc_to = extract_label_time_course( stc, labels_to, src_to, mode='mean') if kind == 'surface': assert_allclose(label_tc, label_tc_to, rtol=1e-12, atol=1e-12) else: corr = np.corrcoef(label_tc.ravel(), label_tc_to.ravel())[0, 1] assert 0.93 < corr < 0.96, scale @testing.requires_testing_data @pytest.mark.parametrize('kind', [ 'surface', pytest.param('volume', marks=[pytest.mark.slowtest, requires_version('nibabel')]), ]) def test_label_extraction_subject(kind): """Test that label extraction subject is treated properly.""" if kind == 'surface': inv = read_inverse_operator(fname_inv) labels = read_labels_from_annot( 'sample', subjects_dir=subjects_dir) labels_fs = read_labels_from_annot( 'fsaverage', subjects_dir=subjects_dir) labels_fs = [label for label in labels_fs if not label.name.startswith('unknown')] assert all(label.subject == 'sample' for label in labels) assert all(label.subject == 'fsaverage' for label in labels_fs) assert len(labels) == len(labels_fs) == 68 n_labels = 68 else: assert kind == 'volume' inv = read_inverse_operator(fname_inv_vol) inv['src'][0]['subject_his_id'] = 'sample' # modernize labels = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') labels_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz') n_labels = 46 src = inv['src'] assert src.kind == kind assert src._subject == 'sample' ave = read_evokeds(fname_evoked)[0].apply_baseline((None, 0)).crop(0, 0.01) assert len(ave.times) == 4 stc = apply_inverse(ave, inv) assert stc.subject == 'sample' ltc = extract_label_time_course(stc, labels, src) stc.subject = 'fsaverage' with pytest.raises(ValueError, match=r'source spac.*not match.* stc\.sub'): extract_label_time_course(stc, labels, src) stc.subject = 'sample' assert ltc.shape == (n_labels, 4) if kind == 'volume': with pytest.raises(RuntimeError, match='atlas.*not match.*source spa'): extract_label_time_course(stc, labels_fs, src) else: with pytest.raises(ValueError, match=r'label\.sub.*not match.* stc\.'): extract_label_time_course(stc, labels_fs, src) stc.subject = None with pytest.raises(ValueError, match=r'label\.sub.*not match.* sourc'): extract_label_time_course(stc, labels_fs, src)
bsd-3-clause
wlamond/scikit-learn
sklearn/tests/test_random_projection.py
141
14040
from __future__ import division import numpy as np import scipy.sparse as sp from sklearn.metrics import euclidean_distances from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import gaussian_random_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.random_projection import SparseRandomProjection from sklearn.random_projection import GaussianRandomProjection from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.exceptions import DataDimensionalityWarning all_sparse_random_matrix = [sparse_random_matrix] all_dense_random_matrix = [gaussian_random_matrix] all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix) all_SparseRandomProjection = [SparseRandomProjection] all_DenseRandomProjection = [GaussianRandomProjection] all_RandomProjection = set(all_SparseRandomProjection + all_DenseRandomProjection) # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros): rng = np.random.RandomState(0) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def densify(matrix): if not sp.issparse(matrix): return matrix else: return matrix.toarray() n_samples, n_features = (10, 1000) n_nonzeros = int(n_samples * n_features / 100.) data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros) ############################################################################### # test on JL lemma ############################################################################### def test_invalid_jl_domain(): assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5) def test_input_size_jl_min_dim(): assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100], 2 * [0.9]) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100], 2 * [0.9]) johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)), 0.5 * np.ones((10, 10))) ############################################################################### # tests random matrix generation ############################################################################### def check_input_size_random_matrix(random_matrix): assert_raises(ValueError, random_matrix, 0, 0) assert_raises(ValueError, random_matrix, -1, 1) assert_raises(ValueError, random_matrix, 1, -1) assert_raises(ValueError, random_matrix, 1, 0) assert_raises(ValueError, random_matrix, -1, 0) def check_size_generated(random_matrix): assert_equal(random_matrix(1, 5).shape, (1, 5)) assert_equal(random_matrix(5, 1).shape, (5, 1)) assert_equal(random_matrix(5, 5).shape, (5, 5)) assert_equal(random_matrix(1, 1).shape, (1, 1)) def check_zero_mean_and_unit_norm(random_matrix): # All random matrix should produce a transformation matrix # with zero mean and unit norm for each columns A = densify(random_matrix(10000, 1, random_state=0)) assert_array_almost_equal(0, np.mean(A), 3) assert_array_almost_equal(1.0, np.linalg.norm(A), 1) def check_input_with_sparse_random_matrix(random_matrix): n_components, n_features = 5, 10 for density in [-1., 0.0, 1.1]: assert_raises(ValueError, random_matrix, n_components, n_features, density=density) def test_basic_property_of_random_matrix(): # Check basic properties of random matrix generation for random_matrix in all_random_matrix: yield check_input_size_random_matrix, random_matrix yield check_size_generated, random_matrix yield check_zero_mean_and_unit_norm, random_matrix for random_matrix in all_sparse_random_matrix: yield check_input_with_sparse_random_matrix, random_matrix random_matrix_dense = \ lambda n_components, n_features, random_state: random_matrix( n_components, n_features, random_state=random_state, density=1.0) yield check_zero_mean_and_unit_norm, random_matrix_dense def test_gaussian_random_matrix(): # Check some statical properties of Gaussian random matrix # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # a_ij ~ N(0.0, 1 / n_components). # n_components = 100 n_features = 1000 A = gaussian_random_matrix(n_components, n_features, random_state=0) assert_array_almost_equal(0.0, np.mean(A), 2) assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1) def test_sparse_random_matrix(): # Check some statical properties of sparse random matrix n_components = 100 n_features = 500 for density in [0.3, 1.]: s = 1 / density A = sparse_random_matrix(n_components, n_features, density=density, random_state=0) A = densify(A) # Check possible values values = np.unique(A) assert_in(np.sqrt(s) / np.sqrt(n_components), values) assert_in(- np.sqrt(s) / np.sqrt(n_components), values) if density == 1.0: assert_equal(np.size(values), 2) else: assert_in(0., values) assert_equal(np.size(values), 3) # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # # - -sqrt(s) / sqrt(n_components) with probability 1 / 2s # - 0 with probability 1 - 1 / s # - +sqrt(s) / sqrt(n_components) with probability 1 / 2s # assert_almost_equal(np.mean(A == 0.0), 1 - 1 / s, decimal=2) assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2) assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2) assert_almost_equal(np.var(A == 0.0, ddof=1), (1 - 1 / s) * 1 / s, decimal=2) assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2) assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2) ############################################################################### # tests on random projection transformer ############################################################################### def test_sparse_random_projection_transformer_invalid_density(): for RandomProjection in all_SparseRandomProjection: assert_raises(ValueError, RandomProjection(density=1.1).fit, data) assert_raises(ValueError, RandomProjection(density=0).fit, data) assert_raises(ValueError, RandomProjection(density=-0.1).fit, data) def test_random_projection_transformer_invalid_input(): for RandomProjection in all_RandomProjection: assert_raises(ValueError, RandomProjection(n_components='auto').fit, [[0, 1, 2]]) assert_raises(ValueError, RandomProjection(n_components=-10).fit, data) def test_try_to_transform_before_fit(): for RandomProjection in all_RandomProjection: assert_raises(ValueError, RandomProjection(n_components='auto').transform, data) def test_too_many_samples_to_find_a_safe_embedding(): data, _ = make_sparse_random_data(1000, 100, 1000) for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', eps=0.1) expected_msg = ( 'eps=0.100000 and n_samples=1000 lead to a target dimension' ' of 5920 which is larger than the original space with' ' n_features=100') assert_raise_message(ValueError, expected_msg, rp.fit, data) def test_random_projection_embedding_quality(): data, _ = make_sparse_random_data(8, 5000, 15000) eps = 0.2 original_distances = euclidean_distances(data, squared=True) original_distances = original_distances.ravel() non_identical = original_distances != 0.0 # remove 0 distances to avoid division by 0 original_distances = original_distances[non_identical] for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', eps=eps, random_state=0) projected = rp.fit_transform(data) projected_distances = euclidean_distances(projected, squared=True) projected_distances = projected_distances.ravel() # remove 0 distances to avoid division by 0 projected_distances = projected_distances[non_identical] distances_ratio = projected_distances / original_distances # check that the automatically tuned values for the density respect the # contract for eps: pairwise distances are preserved according to the # Johnson-Lindenstrauss lemma assert_less(distances_ratio.max(), 1 + eps) assert_less(1 - eps, distances_ratio.min()) def test_SparseRandomProjection_output_representation(): for SparseRandomProjection in all_SparseRandomProjection: # when using sparse input, the projected data can be forced to be a # dense numpy array rp = SparseRandomProjection(n_components=10, dense_output=True, random_state=0) rp.fit(data) assert isinstance(rp.transform(data), np.ndarray) sparse_data = sp.csr_matrix(data) assert isinstance(rp.transform(sparse_data), np.ndarray) # the output can be left to a sparse matrix instead rp = SparseRandomProjection(n_components=10, dense_output=False, random_state=0) rp = rp.fit(data) # output for dense input will stay dense: assert isinstance(rp.transform(data), np.ndarray) # output for sparse output will be sparse: assert sp.issparse(rp.transform(sparse_data)) def test_correct_RandomProjection_dimensions_embedding(): for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', random_state=0, eps=0.5).fit(data) # the number of components is adjusted from the shape of the training # set assert_equal(rp.n_components, 'auto') assert_equal(rp.n_components_, 110) if RandomProjection in all_SparseRandomProjection: assert_equal(rp.density, 'auto') assert_almost_equal(rp.density_, 0.03, 2) assert_equal(rp.components_.shape, (110, n_features)) projected_1 = rp.transform(data) assert_equal(projected_1.shape, (n_samples, 110)) # once the RP is 'fitted' the projection is always the same projected_2 = rp.transform(data) assert_array_equal(projected_1, projected_2) # fit transform with same random seed will lead to the same results rp2 = RandomProjection(random_state=0, eps=0.5) projected_3 = rp2.fit_transform(data) assert_array_equal(projected_1, projected_3) # Try to transform with an input X of size different from fitted. assert_raises(ValueError, rp.transform, data[:, 1:5]) # it is also possible to fix the number of components and the density # level if RandomProjection in all_SparseRandomProjection: rp = RandomProjection(n_components=100, density=0.001, random_state=0) projected = rp.fit_transform(data) assert_equal(projected.shape, (n_samples, 100)) assert_equal(rp.components_.shape, (100, n_features)) assert_less(rp.components_.nnz, 115) # close to 1% density assert_less(85, rp.components_.nnz) # close to 1% density def test_warning_n_components_greater_than_n_features(): n_features = 20 data, _ = make_sparse_random_data(5, n_features, int(n_features / 4)) for RandomProjection in all_RandomProjection: assert_warns(DataDimensionalityWarning, RandomProjection(n_components=n_features + 1).fit, data) def test_works_with_sparse_data(): n_features = 20 data, _ = make_sparse_random_data(5, n_features, int(n_features / 4)) for RandomProjection in all_RandomProjection: rp_dense = RandomProjection(n_components=3, random_state=1).fit(data) rp_sparse = RandomProjection(n_components=3, random_state=1).fit(sp.csr_matrix(data)) assert_array_almost_equal(densify(rp_dense.components_), densify(rp_sparse.components_))
bsd-3-clause
zorroblue/scikit-learn
sklearn/gaussian_process/gpr.py
9
20571
"""Gaussian processes regression. """ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve_triangular from scipy.optimize import fmin_l_bfgs_b from sklearn.base import BaseEstimator, RegressorMixin, clone from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C from sklearn.utils import check_random_state from sklearn.utils.validation import check_X_y, check_array from sklearn.utils.deprecation import deprecated class GaussianProcessRegressor(BaseEstimator, RegressorMixin): """Gaussian process regression (GPR). The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. In addition to standard scikit-learn estimator API, GaussianProcessRegressor: * allows prediction without prior fitting (based on the GP prior) * provides an additional method sample_y(X), which evaluates samples drawn from the GPR (prior or posterior) at given inputs * exposes a method log_marginal_likelihood(theta), which can be used externally for other ways of selecting hyperparameters, e.g., via Markov chain Monte Carlo. Read more in the :ref:`User Guide <gaussian_process>`. .. versionadded:: 0.18 Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. alpha : float or array-like, optional (default: 1e-10) Value added to the diagonal of the kernel matrix during fitting. Larger values correspond to increased noise level in the observations. This can also prevent a potential numerical issue during fitting, by ensuring that the calculated values form a positive definite matrix. If an array is passed, it must have the same number of entries as the data used for fitting and is used as datapoint-dependent noise level. Note that this is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify the noise level directly as a parameter is mainly for convenience and for consistency with Ridge. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer == 0 implies that one run is performed. normalize_y : boolean, optional (default: False) Whether the target values y are normalized, i.e., the mean of the observed target values become zero. This parameter should be set to True if the target values' mean is expected to differ considerable from zero. When enabled, the normalization effectively modifies the GP's prior based on the data, which contradicts the likelihood principle; normalization is thus disabled per default. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, optional (default: None) The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_ : array-like, shape = (n_samples, [n_output_dims]) Target values in training data (also required for prediction) kernel_ : kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_ : array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in ``X_train_`` alpha_ : array-like, shape = (n_samples,) Dual coefficients of training data points in kernel space log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` """ def __init__(self, kernel=None, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None): self.kernel = kernel self.alpha = alpha self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.normalize_y = normalize_y self.copy_X_train = copy_X_train self.random_state = random_state @property @deprecated("Attribute rng was deprecated in version 0.19 and " "will be removed in 0.21.") def rng(self): return self._rng @property @deprecated("Attribute y_train_mean was deprecated in version 0.19 and " "will be removed in 0.21.") def y_train_mean(self): return self._y_train_mean def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples, [n_output_dims]) Target values Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) X, y = check_X_y(X, y, multi_output=True, y_numeric=True) # Normalize target value if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) # demean y y = y - self._y_train_mean else: self._y_train_mean = np.zeros(1) if np.iterable(self.alpha) \ and self.alpha.shape[0] != y.shape[0]: if self.alpha.shape[0] == 1: self.alpha = self.alpha[0] else: raise ValueError("alpha must be a scalar or an array" " with same number of entries as y.(%d != %d)" % (self.alpha.shape[0], y.shape[0])) self.X_train_ = np.copy(X) if self.copy_X_train else X self.y_train_ = np.copy(y) if self.copy_X_train else y if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [(self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds))] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = \ self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=True) # Line 2 # self.L_ changed, self._K_inv needs to be recomputed self._K_inv = None except np.linalg.LinAlgError as exc: exc.args = ("The kernel, %s, is not returning a " "positive definite matrix. Try gradually " "increasing the 'alpha' parameter of your " "GaussianProcessRegressor estimator." % self.kernel_,) + exc.args raise self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3 return self def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, also its standard deviation (return_std=True) or covariance (return_cov=True). Note that at most one of the two can be requested. Parameters ---------- X : array-like, shape = (n_samples, n_features) Query points where the GP is evaluated return_std : bool, default: False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default: False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean Returns ------- y_mean : array, shape = (n_samples, [n_output_dims]) Mean of predictive distribution a query points y_std : array, shape = (n_samples,), optional Standard deviation of predictive distribution at query points. Only returned when return_std is True. y_cov : array, shape = (n_samples, n_samples), optional Covariance of joint predictive distribution a query points. Only returned when return_cov is True. """ if return_std and return_cov: raise RuntimeError( "Not returning standard deviation of predictions when " "returning full covariance.") X = check_array(X) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) else: kernel = self.kernel y_mean = np.zeros(X.shape[0]) if return_cov: y_cov = kernel(X) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star) y_mean = self._y_train_mean + y_mean # undo normal. if return_cov: v = cho_solve((self.L_, True), K_trans.T) # Line 5 y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6 return y_mean, y_cov elif return_std: # cache result of K_inv computation if self._K_inv is None: # compute inverse K_inv of K based on its Cholesky # decomposition L and its inverse L_inv L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0])) self._K_inv = L_inv.dot(L_inv.T) # Compute variance of predictive distribution y_var = self.kernel_.diag(X) y_var -= np.einsum("ij,ij->i", np.dot(K_trans, self._K_inv), K_trans) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn("Predicted variances smaller than 0. " "Setting those variances to 0.") y_var[y_var_negative] = 0.0 return y_mean, np.sqrt(y_var) else: return y_mean def sample_y(self, X, n_samples=1, random_state=0): """Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like, shape = (n_samples_X, n_features) Query points where the GP samples are evaluated n_samples : int, default: 1 The number of samples drawn from the Gaussian process random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points. """ rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = \ [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_mean.shape[1])] y_samples = np.hstack(y_samples) return y_samples def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=True) # Line 2 except np.linalg.LinAlgError: return (-np.inf, np.zeros_like(theta)) \ if eval_gradient else -np.inf # Support multi-dimensional output of self.y_train_ y_train = self.y_train_ if y_train.ndim == 1: y_train = y_train[:, np.newaxis] alpha = cho_solve((L, True), y_train) # Line 3 # Compute log-likelihood (compare line 7) log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions if eval_gradient: # compare Equation 5.9 from GPML tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis] # Compute "0.5 * trace(tmp.dot(K_gradient))" without # constructing the full matrix tmp.dot(K_gradient) since only # its diagonal is required log_likelihood_gradient_dims = \ 0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient) log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1) if eval_gradient: return log_likelihood, log_likelihood_gradient else: return log_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min
bsd-3-clause
MatthieuBizien/scikit-learn
examples/manifold/plot_manifold_sphere.py
16
5103
#!/usr/bin/python # -*- coding: utf-8 -*- """ ============================================= Manifold Learning methods on a severed sphere ============================================= An application of the different :ref:`manifold` techniques on a spherical data-set. Here one can see the use of dimensionality reduction in order to gain some intuition regarding the manifold learning methods. Regarding the dataset, the poles are cut from the sphere, as well as a thin slice down its side. This enables the manifold learning techniques to 'spread it open' whilst projecting it onto two dimensions. For a similar example, where the methods are applied to the S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py` Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. Here the manifold problem matches fairly that of representing a flat map of the Earth, as with `map projection <https://en.wikipedia.org/wiki/Map_projection>`_ """ # Author: Jaques Grobler <jaques.grobler@inria.fr> # License: BSD 3 clause print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold from sklearn.utils import check_random_state # Next line to silence pyflakes. Axes3D # Variables for manifold learning. n_neighbors = 10 n_samples = 1000 # Create our sphere. random_state = check_random_state(0) p = random_state.rand(n_samples) * (2 * np.pi - 0.55) t = random_state.rand(n_samples) * np.pi # Sever the poles from the sphere. indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8)))) colors = p[indices] x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \ np.sin(t[indices]) * np.sin(p[indices]), \ np.cos(t[indices]) # Plot our dataset. fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) ax = fig.add_subplot(251, projection='3d') ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow) try: # compatibility matplotlib < 1.0 ax.view_init(40, -10) except: pass sphere_data = np.array([x, y, z]).T # Perform Locally Linear Embedding Manifold learning methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() trans_data = manifold\ .LocallyLinearEmbedding(n_neighbors, 2, method=method).fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % (methods[i], t1 - t0)) ax = fig.add_subplot(252 + i) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Isomap Manifold learning. t0 = time() trans_data = manifold.Isomap(n_neighbors, n_components=2)\ .fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % ('ISO', t1 - t0)) ax = fig.add_subplot(257) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Multi-dimensional scaling. t0 = time() mds = manifold.MDS(2, max_iter=100, n_init=1) trans_data = mds.fit_transform(sphere_data).T t1 = time() print("MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(258) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Spectral Embedding. t0 = time() se = manifold.SpectralEmbedding(n_components=2, n_neighbors=n_neighbors) trans_data = se.fit_transform(sphere_data).T t1 = time() print("Spectral Embedding: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(259) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform t-distributed stochastic neighbor embedding. t0 = time() tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) trans_data = tsne.fit_transform(sphere_data).T t1 = time() print("t-SNE: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(2, 5, 10) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("t-SNE (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') plt.show()
bsd-3-clause
lfairchild/PmagPy
programs/eqarea_magic.py
1
22546
#!/usr/bin/env python # -*- python-indent-offset: 4; -*- import sys import os import matplotlib if matplotlib.get_backend() != "TKAgg": matplotlib.use("TKAgg") import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib import pmagpy.contribution_builder as cb def plot_eq(in_file='sites.txt', dir_path=".", input_dir_path="", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", plot_by="all", crd="g", ignore_tilt=False, save_plots=True, fmt="svg", contour=False, color_map="coolwarm", plot_ell=""): """ makes equal area projections from declination/inclination data Parameters ---------- in_file : str, default "sites.txt" dir_path : str output directory, default "." input_dir_path : str input file directory (if different from dir_path), default "" spec_file : str input specimen file name, default "specimens.txt" samp_file: str input sample file name, default "samples.txt" site_file : str input site file name, default "sites.txt" loc_file : str input location file name, default "locations.txt" plot_by : str [spc, sam, sit, loc, all] (specimen, sample, site, location, all), default "all" crd : ['s','g','t'], coordinate system for plotting whereby: s : specimen coordinates, aniso_tile_correction = -1 g : geographic coordinates, aniso_tile_correction = 0 (default) t : tilt corrected coordinates, aniso_tile_correction = 100 ignore_tilt : bool default False. If True, data are unoriented (allows plotting of measurement dec/inc) save_plots : bool plot and save non-interactively, default True fmt : str ["png", "svg", "pdf", "jpg"], default "svg" contour : bool plot as color contour colormap : str color map for contour plotting, default "coolwarm" see cartopy documentation for more options plot_ell : str [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors default "" plots none Returns --------- type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) """ # parse out input/out directories dir_path = os.path.realpath(dir_path) if not input_dir_path: input_dir_path = dir_path input_dir_path = os.path.realpath(input_dir_path) # initialize some variables verbose = pmagplotlib.verbose FIG = {} # plot dictionary FIG['eqarea'] = 1 # eqarea is figure 1 pmagplotlib.plot_init(FIG['eqarea'], 5, 5) # get coordinate system if crd == "s": coord = "-1" elif crd == "t": coord = "100" else: coord = "0" # get item to plot by if plot_by == 'all': plot_key = 'all' elif plot_by == 'sit': plot_key = 'site' elif plot_by == 'sam': plot_key = 'sample' elif plot_by == 'spc': plot_key = 'specimen' else: plot_by = 'all' plot_key = 'all' # get distribution to plot ellipses/eigenvectors if desired if plot_ell: dist = plot_ell.upper() # if dist type is unrecognized, use Fisher if dist not in ['F', 'K', 'B', 'BE', 'BV']: dist = 'F' if dist == "BV": FIG['bdirs'] = 2 pmagplotlib.plot_init(FIG['bdirs'], 5, 5) if save_plots: do_plot = True verbose = False # set keys dec_key = 'dir_dec' inc_key = 'dir_inc' tilt_key = 'dir_tilt_correction' # create contribution fnames = {"specimens": spec_file, "samples": samp_file, 'sites': site_file, 'locations': loc_file} if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)): print('-E- Could not find {}'.format(in_file)) return False, [] contribution = cb.Contribution(input_dir_path, custom_filenames=fnames, single_file=in_file) try: contribution.propagate_location_to_samples() contribution.propagate_location_to_specimens() contribution.propagate_location_to_measurements() except KeyError as ex: pass # the object that contains the DataFrame + useful helper methods: table_name = list(contribution.tables.keys())[0] data_container = contribution.tables[table_name] # the actual DataFrame: data = data_container.df if plot_key != "all" and plot_key not in data.columns: print("-E- You can't plot by {} with the data provided".format(plot_key)) return False, [] # add tilt key into DataFrame columns if it isn't there already if tilt_key not in data.columns: data.loc[:, tilt_key] = None if verbose: print(len(data), ' records read from ', in_file) # find desired dec,inc data: dir_type_key = '' # # get plotlist if not plotting all records # plotlist = [] if plot_key != "all": # return all where plot_key is not blank if plot_key not in data.columns: print('-E- Can\'t plot by "{}". That header is not in infile: {}'.format( plot_key, in_file)) return False, [] plots = data[data[plot_key].notnull()] plotlist = plots[plot_key].unique() # grab unique values else: plotlist.append('All') for plot in plotlist: if verbose: print(plot) if plot == 'All': # plot everything at once plot_data = data else: # pull out only partial data plot_data = data[data[plot_key] == plot] DIblock = [] GCblock = [] # SLblock, SPblock = [], [] title = plot mode = 1 if dec_key not in plot_data.columns: print("-W- No dec/inc data") continue # get all records where dec & inc values exist plot_data = plot_data[plot_data[dec_key].notnull() & plot_data[inc_key].notnull()] if plot_data.empty: print("-W- No dec/inc data") continue # get metadata for naming the plot file locations = str(data_container.get_name('location', df_slice=plot_data)) site = str(data_container.get_name('site', df_slice=plot_data)) sample = str(data_container.get_name('sample', df_slice=plot_data)) specimen = str(data_container.get_name('specimen', df_slice=plot_data)) # make sure method_codes is in plot_data if 'method_codes' not in plot_data.columns: plot_data['method_codes'] = '' # get data blocks # would have to ignore tilt to use measurement level data DIblock = data_container.get_di_block(df_slice=plot_data, tilt_corr=coord, excl=['DE-BFP'], ignore_tilt=ignore_tilt) #SLblock = [[ind, row['method_codes']] for ind, row in plot_data.iterrows()] # get great circles great_circle_data = data_container.get_records_for_code('DE-BFP', incl=True, use_slice=True, sli=plot_data) if len(great_circle_data) > 0: gc_cond = great_circle_data[tilt_key] == coord GCblock = [[float(row[dec_key]), float(row[inc_key])] for ind, row in great_circle_data[gc_cond].iterrows()] #SPblock = [[ind, row['method_codes']] for ind, row in great_circle_data[gc_cond].iterrows()] if len(DIblock) > 0: if not contour: pmagplotlib.plot_eq(FIG['eqarea'], DIblock, title) else: pmagplotlib.plot_eq_cont( FIG['eqarea'], DIblock, color_map=color_map) else: pmagplotlib.plot_net(FIG['eqarea']) if len(GCblock) > 0: for rec in GCblock: pmagplotlib.plot_circ(FIG['eqarea'], rec, 90., 'g') if len(DIblock) == 0 and len(GCblock) == 0: if verbose: print("no records for plotting") continue # sys.exit() if plot_ell: ppars = pmag.doprinc(DIblock) # get principal directions nDIs, rDIs, npars, rpars = [], [], [], [] for rec in DIblock: angle = pmag.angle([rec[0], rec[1]], [ ppars['dec'], ppars['inc']]) if angle > 90.: rDIs.append(rec) else: nDIs.append(rec) if dist == 'B': # do on whole dataset etitle = "Bingham confidence ellipse" bpars = pmag.dobingham(DIblock) for key in list(bpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (bpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (bpars[key])) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist == 'F': etitle = "Fisher confidence cone" if len(nDIs) > 2: fpars = pmag.fisher_mean(nDIs) for key in list(fpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (fpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (fpars[key])) mode += 1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] npars.append(fpars['inc']-isign*90.) # Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec']+90.) # Beta dec npars.append(0.) # Beta inc if len(rDIs) > 2: fpars = pmag.fisher_mean(rDIs) if verbose: print("mode ", mode) for key in list(fpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (fpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (fpars[key])) mode += 1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] rpars.append(fpars['inc']-isign*90.) # Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec']+90.) # Beta dec rpars.append(0.) # Beta inc if dist == 'K': etitle = "Kent confidence ellipse" if len(nDIs) > 3: kpars = pmag.dokent(nDIs, len(nDIs)) if verbose: print("mode ", mode) for key in list(kpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (kpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (kpars[key])) mode += 1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs) > 3: kpars = pmag.dokent(rDIs, len(rDIs)) if verbose: print("mode ", mode) for key in list(kpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (kpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (kpars[key])) mode += 1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if dist == 'BE': if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) Bkpars = pmag.dokent(BnDIs, 1.) if verbose: print("mode ", mode) for key in list(Bkpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (Bkpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (Bkpars[key])) mode += 1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) Bkpars = pmag.dokent(BrDIs, 1.) if verbose: print("mode ", mode) for key in list(Bkpars.keys()): if key != 'n' and verbose: print(" ", key, '%7.1f' % (Bkpars[key])) if key == 'n' and verbose: print(" ", key, ' %i' % (Bkpars[key])) mode += 1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) etitle = "Bootstrapped confidence ellipse" elif dist == 'BV': sym = {'lower': ['o', 'c'], 'upper': [ 'o', 'g'], 'size': 3, 'edgecolor': 'face'} if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) pmagplotlib.plot_eq_sym( FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', sym) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) if len(nDIs) > 5: # plot on existing plots pmagplotlib.plot_di_sym(FIG['bdirs'], BrDIs, sym) else: pmagplotlib.plot_eq( FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors') if dist == 'B': if len(nDIs) > 3 or len(rDIs) > 3: pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0) elif len(nDIs) > 3 and dist != 'BV': pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0) if len(rDIs) > 3: pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0) elif len(rDIs) > 3 and dist != 'BV': pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0) for key in list(FIG.keys()): files = {} #if filename: # use provided filename # filename += '.' + fmt if pmagplotlib.isServer: # use server plot naming convention filename = 'LO:_'+locations+'_SI:_'+site+'_SA:_'+sample + \ '_SP:_'+str(specimen)+'_CO:_'+crd+'_TY:_'+key+'_.'+fmt elif plot_key == 'all': filename = 'all' if 'location' in plot_data.columns: locs = plot_data['location'].unique() loc_string = "_".join( [str(loc).replace(' ', '_') for loc in locs]) filename += "_" + loc_string filename += "_" + crd + "_" + key filename += ".{}".format(fmt) else: # use more readable naming convention filename = '' # fix this if plot_by is location , for example use_names = {'location': [locations], 'site': [locations, site], 'sample': [locations, site, sample], 'specimen': [locations, site, sample, specimen]} use = use_names[plot_key] use.extend([crd, key]) # [locations, site, sample, specimen, crd, key]: for item in use: if item: item = item.replace(' ', '_') filename += item + '_' if filename.endswith('_'): filename = filename[:-1] filename += ".{}".format(fmt) if not pmagplotlib.isServer: filename = os.path.join(dir_path, filename) files[key] = filename if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles = {} titles['eqarea'] = 'Equal Area Plot' FIG = pmagplotlib.add_borders(FIG, titles, black, purple) pmagplotlib.save_plots(FIG, files) elif do_plot: pmagplotlib.save_plots(FIG, files, incl_directory=True) continue if verbose: pmagplotlib.draw_figs(FIG) ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans == "q": return True, [] if ans == "a": pmagplotlib.save_plots(FIG, files, incl_directory=True) continue return True, [] def main(): """ NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted sites, samples, specimens, or measurements OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic, default='sites.txt' supported types=[measurements, specimens, samples, sites] -fsp FILE: specify specimen file name, (required if you want to plot measurements by sample) default='specimens.txt' -fsa FILE: specify sample file name, (required if you want to plot specimens by site) default='samples.txt' -fsi FILE: specify site file name, default='sites.txt' -flo FILE: specify location file name, default='locations.txt' -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic, unspecified assumed geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour -cm CM use color map CM [default is coolwarm] -sav save plot and quit quietly -no-tilt data are unoriented, allows plotting of measurement dec/inc NOTE all: entire file; sit: site; sam: sample; spc: specimen """ # extract arguments from sys.argv if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=".") input_dir_path = pmag.get_named_arg('-ID', '') if not input_dir_path: input_dir_path = dir_path in_file = pmag.get_named_arg("-f", default_val="sites.txt") in_file = pmag.resolve_file_name(in_file, input_dir_path) if "-ID" not in sys.argv: input_dir_path = os.path.split(in_file)[0] plot_by = pmag.get_named_arg("-obj", default_val="all").lower() spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt") samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt") site_file = pmag.get_named_arg("-fsi", default_val="sites.txt") loc_file = pmag.get_named_arg("-flo", default_val="locations.txt") ignore_tilt = False if '-no-tilt' in sys.argv: ignore_tilt = True color_map = "coolwarm" if '-c' in sys.argv: contour = True if '-cm' in sys.argv: ind = sys.argv.index('-cm') color_map = sys.argv[ind+1] else: color_map = 'coolwarm' else: contour = False save_plots = False if '-sav' in sys.argv: save_plots = True plot_ell = False dist = "" if '-ell' in sys.argv: plot_ell = pmag.get_named_arg("-ell", "F") crd = pmag.get_named_arg("-crd", default_val="g") fmt = pmag.get_named_arg("-fmt", "svg") #filename = pmag.get_named_arg('-fname', '') plot_eq(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file, plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map, plot_ell) if __name__ == "__main__": main()
bsd-3-clause
ArcherSys/ArcherSys
Lib/site-packages/ipykernel/inprocess/ipkernel.py
3
6604
"""An in-process kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from contextlib import contextmanager import logging import sys from IPython.core.interactiveshell import InteractiveShellABC from ipykernel.jsonutil import json_clean from traitlets import Any, Enum, Instance, List, Type from ipykernel.ipkernel import IPythonKernel from ipykernel.zmqshell import ZMQInteractiveShell from .socket import DummySocket from ..iostream import OutStream, BackgroundSocket, IOPubThread #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- class InProcessKernel(IPythonKernel): #------------------------------------------------------------------------- # InProcessKernel interface #------------------------------------------------------------------------- # The frontends connected to this kernel. frontends = List( Instance('ipykernel.inprocess.client.InProcessKernelClient', allow_none=True) ) # The GUI environment that the kernel is running under. This need not be # specified for the normal operation for the kernel, but is required for # IPython's GUI support (including pylab). The default is 'inline' because # it is safe under all GUI toolkits. gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'), default_value='inline') raw_input_str = Any() stdout = Any() stderr = Any() #------------------------------------------------------------------------- # Kernel interface #------------------------------------------------------------------------- shell_class = Type(allow_none=True) shell_streams = List() control_stream = Any() _underlying_iopub_socket = Instance(DummySocket, ()) iopub_thread = Instance(IOPubThread) def _iopub_thread_default(self): thread = IOPubThread(self._underlying_iopub_socket) thread.start() return thread iopub_socket = Instance(BackgroundSocket) def _iopub_socket_default(self): return self.iopub_thread.background_socket stdin_socket = Instance(DummySocket, ()) def __init__(self, **traits): super(InProcessKernel, self).__init__(**traits) self._underlying_iopub_socket.on_trait_change(self._io_dispatch, 'message_sent') self.shell.kernel = self def execute_request(self, stream, ident, parent): """ Override for temporary IO redirection. """ with self._redirected_io(): super(InProcessKernel, self).execute_request(stream, ident, parent) def start(self): """ Override registration of dispatchers for streams. """ self.shell.exit_now = False def _abort_queue(self, stream): """ The in-process kernel doesn't abort requests. """ pass def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. self.raw_input_str = None sys.stderr.flush() sys.stdout.flush() # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) msg = self.session.msg(u'input_request', content, parent) for frontend in self.frontends: if frontend.session.session == parent['header']['session']: frontend.stdin_channel.call_handlers(msg) break else: logging.error('No frontend found for raw_input request') return str() # Await a response. while self.raw_input_str is None: frontend.stdin_channel.process_events() return self.raw_input_str #------------------------------------------------------------------------- # Protected interface #------------------------------------------------------------------------- @contextmanager def _redirected_io(self): """ Temporarily redirect IO to the kernel. """ sys_stdout, sys_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = self.stdout, self.stderr yield sys.stdout, sys.stderr = sys_stdout, sys_stderr #------ Trait change handlers -------------------------------------------- def _io_dispatch(self): """ Called when a message is sent to the IO socket. """ ident, msg = self.session.recv(self.iopub_socket, copy=False) for frontend in self.frontends: frontend.iopub_channel.call_handlers(msg) #------ Trait initializers ----------------------------------------------- def _log_default(self): return logging.getLogger(__name__) def _session_default(self): from jupyter_client.session import Session return Session(parent=self, key=b'') def _shell_class_default(self): return InProcessInteractiveShell def _stdout_default(self): return OutStream(self.session, self.iopub_thread, u'stdout') def _stderr_default(self): return OutStream(self.session, self.iopub_thread, u'stderr') #----------------------------------------------------------------------------- # Interactive shell subclass #----------------------------------------------------------------------------- class InProcessInteractiveShell(ZMQInteractiveShell): kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) #------------------------------------------------------------------------- # InteractiveShell interface #------------------------------------------------------------------------- def enable_gui(self, gui=None): """Enable GUI integration for the kernel.""" from ipykernel.eventloops import enable_gui if not gui: gui = self.kernel.gui return enable_gui(gui, kernel=self.kernel) def enable_matplotlib(self, gui=None): """Enable matplotlib integration for the kernel.""" if not gui: gui = self.kernel.gui return super(InProcessInteractiveShell, self).enable_matplotlib(gui) def enable_pylab(self, gui=None, import_all=True, welcome_message=False): """Activate pylab support at runtime.""" if not gui: gui = self.kernel.gui return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all, welcome_message) InteractiveShellABC.register(InProcessInteractiveShell)
mit
huongttlan/statsmodels
statsmodels/stats/anova.py
25
13433
from statsmodels.compat.python import lrange, lmap import numpy as np from scipy import stats from pandas import DataFrame, Index from statsmodels.formula.formulatools import (_remove_intercept_patsy, _has_intercept, _intercept_idx) def _get_covariance(model, robust): if robust is None: return model.cov_params() elif robust == "hc0": se = model.HC0_se return model.cov_HC0 elif robust == "hc1": se = model.HC1_se return model.cov_HC1 elif robust == "hc2": se = model.HC2_se return model.cov_HC2 elif robust == "hc3": se = model.HC3_se return model.cov_HC3 else: # pragma: no cover raise ValueError("robust options %s not understood" % robust) #NOTE: these need to take into account weights ! def anova_single(model, **kwargs): """ ANOVA table for one fitted linear model. Parameters ---------- model : fitted linear model results instance A fitted linear model typ : int or str {1,2,3} or {"I","II","III"} Type of sum of squares to use. **kwargs** scale : float Estimate of variance, If None, will be estimated from the largest model. Default is None. test : str {"F", "Chisq", "Cp"} or None Test statistics to provide. Default is "F". Notes ----- Use of this function is discouraged. Use anova_lm instead. """ test = kwargs.get("test", "F") scale = kwargs.get("scale", None) typ = kwargs.get("typ", 1) robust = kwargs.get("robust", None) if robust: robust = robust.lower() endog = model.model.endog exog = model.model.exog nobs = exog.shape[0] response_name = model.model.endog_names design_info = model.model.data.design_info exog_names = model.model.exog_names # +1 for resids n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1) pr_test = "PR(>%s)" % test names = ['df', 'sum_sq', 'mean_sq', test, pr_test] table = DataFrame(np.zeros((n_rows, 5)), columns = names) if typ in [1,"I"]: return anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test, pr_test, robust) elif typ in [2, "II"]: return anova2_lm_single(model, design_info, n_rows, test, pr_test, robust) elif typ in [3, "III"]: return anova3_lm_single(model, design_info, n_rows, test, pr_test, robust) elif typ in [4, "IV"]: raise NotImplemented("Type IV not yet implemented") else: # pragma: no cover raise ValueError("Type %s not understood" % str(typ)) def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test, pr_test, robust): """ ANOVA table for one fitted linear model. Parameters ---------- model : fitted linear model results instance A fitted linear model **kwargs** scale : float Estimate of variance, If None, will be estimated from the largest model. Default is None. test : str {"F", "Chisq", "Cp"} or None Test statistics to provide. Default is "F". Notes ----- Use of this function is discouraged. Use anova_lm instead. """ #maybe we should rethink using pinv > qr in OLS/linear models? effects = getattr(model, 'effects', None) if effects is None: q,r = np.linalg.qr(exog) effects = np.dot(q.T, endog) arr = np.zeros((len(design_info.terms), len(design_info.column_names))) slices = [design_info.slice(name) for name in design_info.term_names] for i,slice_ in enumerate(slices): arr[i, slice_] = 1 sum_sq = np.dot(arr, effects**2) #NOTE: assumes intercept is first column idx = _intercept_idx(design_info) sum_sq = sum_sq[~idx] term_names = np.array(design_info.term_names) # want boolean indexing term_names = term_names[~idx] index = term_names.tolist() table.index = Index(index + ['Residual']) table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq] if test == 'F': table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/ (model.ssr/model.df_resid)) table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"], model.df_resid) # fill in residual table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr, model.df_resid, np.nan, np.nan) table['mean_sq'] = table['sum_sq'] / table['df'] return table #NOTE: the below is not agnostic about formula... def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust): """ ANOVA type II table for one fitted linear model. Parameters ---------- model : fitted linear model results instance A fitted linear model **kwargs** scale : float Estimate of variance, If None, will be estimated from the largest model. Default is None. test : str {"F", "Chisq", "Cp"} or None Test statistics to provide. Default is "F". Notes ----- Use of this function is discouraged. Use anova_lm instead. Type II Sum of Squares compares marginal contribution of terms. Thus, it is not particularly useful for models with significant interaction terms. """ terms_info = design_info.terms[:] # copy terms_info = _remove_intercept_patsy(terms_info) names = ['sum_sq', 'df', test, pr_test] table = DataFrame(np.zeros((n_rows, 4)), columns = names) cov = _get_covariance(model, None) robust_cov = _get_covariance(model, robust) col_order = [] index = [] for i, term in enumerate(terms_info): # grab all varaibles except interaction effects that contain term # need two hypotheses matrices L1 is most restrictive, ie., term==0 # L2 is everything except term==0 cols = design_info.slice(term) L1 = lrange(cols.start, cols.stop) L2 = [] term_set = set(term.factors) for t in terms_info: # for the term you have other_set = set(t.factors) if term_set.issubset(other_set) and not term_set == other_set: col = design_info.slice(t) # on a higher order term containing current `term` L1.extend(lrange(col.start, col.stop)) L2.extend(lrange(col.start, col.stop)) L1 = np.eye(model.model.exog.shape[1])[L1] L2 = np.eye(model.model.exog.shape[1])[L2] if L2.size: LVL = np.dot(np.dot(L1,robust_cov),L2.T) from scipy import linalg orth_compl,_ = linalg.qr(LVL) r = L1.shape[0] - L2.shape[0] # L1|2 # use the non-unique orthogonal completion since L12 is rank r L12 = np.dot(orth_compl[:,-r:].T, L1) else: L12 = L1 r = L1.shape[0] #from IPython.core.debugger import Pdb; Pdb().set_trace() if test == 'F': f = model.f_test(L12, cov_p=robust_cov) table.ix[i, test] = test_value = f.fvalue table.ix[i, pr_test] = f.pvalue # need to back out SSR from f_test table.ix[i, 'df'] = r col_order.append(cols.start) index.append(term.name()) table.index = Index(index + ['Residual']) table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])] # back out sum of squares from f_test ssr = table[test] * table['df'] * model.ssr/model.df_resid table['sum_sq'] = ssr # fill in residual table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr, model.df_resid, np.nan, np.nan) return table def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust): n_rows += _has_intercept(design_info) terms_info = design_info.terms names = ['sum_sq', 'df', test, pr_test] table = DataFrame(np.zeros((n_rows, 4)), columns = names) cov = _get_covariance(model, robust) col_order = [] index = [] for i, term in enumerate(terms_info): # grab term, hypothesis is that term == 0 cols = design_info.slice(term) L1 = np.eye(model.model.exog.shape[1])[cols] L12 = L1 r = L1.shape[0] if test == 'F': f = model.f_test(L12, cov_p=cov) table.ix[i, test] = test_value = f.fvalue table.ix[i, pr_test] = f.pvalue # need to back out SSR from f_test table.ix[i, 'df'] = r #col_order.append(cols.start) index.append(term.name()) table.index = Index(index + ['Residual']) #NOTE: Don't need to sort because terms are an ordered dict now #table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])] # back out sum of squares from f_test ssr = table[test] * table['df'] * model.ssr/model.df_resid table['sum_sq'] = ssr # fill in residual table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr, model.df_resid, np.nan, np.nan) return table def anova_lm(*args, **kwargs): """ ANOVA table for one or more fitted linear models. Parameters ---------- args : fitted linear model results instance One or more fitted linear models scale : float Estimate of variance, If None, will be estimated from the largest model. Default is None. test : str {"F", "Chisq", "Cp"} or None Test statistics to provide. Default is "F". typ : str or int {"I","II","III"} or {1,2,3} The type of ANOVA test to perform. See notes. robust : {None, "hc0", "hc1", "hc2", "hc3"} Use heteroscedasticity-corrected coefficient covariance matrix. If robust covariance is desired, it is recommended to use `hc3`. Returns ------- anova : DataFrame A DataFrame containing. Notes ----- Model statistics are given in the order of args. Models must have been fit using the formula api. See Also -------- model_results.compare_f_test, model_results.compare_lm_test Examples -------- >>> import statsmodels.api as sm >>> from statsmodels.formula.api import ols >>> moore = sm.datasets.get_rdataset("Moore", "car", ... cache=True) # load data >>> data = moore.data >>> data = data.rename(columns={"partner.status" : ... "partner_status"}) # make name pythonic >>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)', ... data=data).fit() >>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame >>> print table """ typ = kwargs.get('typ', 1) ### Farm Out Single model ANOVA Type I, II, III, and IV ### if len(args) == 1: model = args[0] return anova_single(model, **kwargs) try: assert typ in [1,"I"] except: raise ValueError("Multiple models only supported for type I. " "Got type %s" % str(typ)) ### COMPUTE ANOVA TYPE I ### # if given a single model if len(args) == 1: return anova_single(*args, **kwargs) # received multiple fitted models test = kwargs.get("test", "F") scale = kwargs.get("scale", None) n_models = len(args) model_formula = [] pr_test = "Pr(>%s)" % test names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test] table = DataFrame(np.zeros((n_models, 6)), columns = names) if not scale: # assume biggest model is last scale = args[-1].scale table["ssr"] = lmap(getattr, args, ["ssr"]*n_models) table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models) table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values) table["ss_diff"] = -table["ssr"].diff() if test == "F": table["F"] = table["ss_diff"] / table["df_diff"] / scale table[pr_test] = stats.f.sf(table["F"], table["df_diff"], table["df_resid"]) # for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan table[pr_test][table['F'].isnull()] = np.nan return table if __name__ == "__main__": import pandas from statsmodels.formula.api import ols # in R #library(car) #write.csv(Moore, "moore.csv", row.names=FALSE) moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1, names=['partner_status','conformity', 'fcategory','fscore']) moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)', data=moore).fit() mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit() # for each term you just want to test vs the model without its # higher-order terms # using Monette-Fox slides and Marden class notes for linear algebra / # orthogonal complement # https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/ table = anova_lm(moore_lm, typ=2)
bsd-3-clause
honahursey/pyFDA
pyfda/plot_widgets/plot_utils.py
1
17195
# -*- coding: utf-8 -*- """ Common plotting utilities Author: Christian Muenker 2015 http://matplotlib.1069221.n5.nabble.com/Figure-with-pyQt-td19095.html http://stackoverflow.com/questions/17973177/matplotlib-and-pyqt-dynamic-figure-runs-slow-after-several-loads-or-looks-messy """ from __future__ import print_function, division, unicode_literals from PyQt4 import QtGui, QtCore from PyQt4.QtGui import QSizePolicy, QLabel, QInputDialog import os, sys import six # do not import matplotlib.pyplot - pyplot brings its own GUI, event loop etc!!! from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar #from matplotlib.backend_bases import cursors as mplCursors from matplotlib.figure import Figure from matplotlib.transforms import Bbox #from mpl_toolkits.mplot3d.axes3d import Axes3D from matplotlib import rcParams try: import matplotlib.backends.qt_editor.figureoptions as figureoptions except ImportError: figureoptions = None from pyfda import pyfda_rc # read user settings for linewidth, font size etc. and apply them to matplotlib for key in pyfda_rc.mpl_rc: rcParams[key] = pyfda_rc.mpl_rc[key] DEBUG = True ####USED TO GET THE USERS HOME DIRECTORY FOR USE OF A TEMP FILE # taken from # http://matplotlib.1069221.n5.nabble.com/Figure-with-pyQt-td19095.html def valid(path): if path and os.path.isdir(path): return True return False def env(name): return os.environ.get( name, '' ) def getHomeDir(): if sys.platform != 'win32': return os.path.expanduser( '~' ) homeDir = env( 'USERPROFILE' ) if not valid(homeDir): homeDir = env( 'HOME' ) if not valid(homeDir) : homeDir = '%s%s' % (env('HOMEDRIVE'),env('HOMEPATH')) if not valid(homeDir) : homeDir = env( 'SYSTEMDRIVE' ) if homeDir and (not homeDir.endswith('\\')) : homeDir += '\\' if not valid(homeDir) : homeDir = 'C:\\' return homeDir #------------------------------------------------------------------------------ class MplWidget(QtGui.QWidget): """ Construct a subwidget with Matplotlib canvas and NavigationToolbar """ def __init__(self, parent = None): super(MplWidget, self).__init__() # initialize QWidget Base Class # Create the mpl figure and subplot (white bg, 100 dots-per-inch). # Construct the canvas with the figure # self.plt_lim = [] # x,y plot limits self.fig = Figure() # self.mpl = self.fig.add_subplot(111) # self.fig.add_axes([.1,.1,.9,.9])# # self.mpl21 = self.fig.add_subplot(211) self.pltCanv = FigureCanvas(self.fig) self.pltCanv.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) # Needed for mouse modifiers (x,y, <CTRL>, ...): # Key press events in general are not processed unless you # "activate the focus of Qt onto your mpl canvas" # http://stackoverflow.com/questions/22043549/matplotlib-and-qt-mouse-press-event-key-is-always-none self.pltCanv.setFocusPolicy( QtCore.Qt.ClickFocus ) #self.pltCanv.setFocusPolicy(QtCore.Qt.WheelFocus) self.pltCanv.setFocus() self.pltCanv.updateGeometry() # Create a custom navigation toolbar, tied to the canvas # #self.mplToolbar = NavigationToolbar(self.pltCanv, self) # original self.mplToolbar = MyMplToolbar(self.pltCanv, self) self.mplToolbar.grid = True self.mplToolbar.enable_update = True #============================================= # Widget layout with QHBox / QVBox #============================================= # self.hbox = QtGui.QHBoxLayout() # # for w in [self.mpl_toolbar, self.butDraw, self.cboxGrid]: # self.hbox.addWidget(w) # self.hbox.setAlignment(w, QtCore.Qt.AlignVCenter) # self.hbox.setSizeConstraint(QtGui.QLayout.SetFixedSize) self.layVMainMpl = QtGui.QVBoxLayout() # self.layVMainMpl.addLayout(self.hbox) self.layVMainMpl.addWidget(self.mplToolbar) self.layVMainMpl.addWidget(self.pltCanv) self.setLayout(self.layVMainMpl) def redraw(self): """ Redraw the figure with new properties (grid, linewidth) """ # self.ax.grid(self.mplToolbar.grid) for ax in self.fig.axes: ax.grid(self.mplToolbar.grid) # collect axes objects and toggle grid # plt.artist.setp(self.pltPlt, linewidth = self.sldLw.value()/5.) self.fig.tight_layout(pad = 0.2) # self.pltCanv.updateGeometry() # self.pltCanv.adjustSize() # resize the parent widget to fit its content self.pltCanv.draw() # now (re-)draw the figure # def pltFullView(self): """ Zoom to full extent of data if axes is set to "navigationable" by the navigation toolbar """ #Add current view limits to view history to enable "back to previous view" self.mplToolbar.push_current() for ax in self.fig.axes: if ax.get_navigate(): ax.autoscale() self.redraw() def full_extent(self, ax, pad=0.0): """Get the full extent of an axes, including axes labels, tick labels, and titles.""" #http://stackoverflow.com/questions/14712665/matplotlib-subplot-background-axes-face-labels-colour-or-figure-axes-coor # For text objects, we need to draw the figure first, otherwise the extents # are undefined. self.pltCanv.draw() items = ax.get_xticklabels() + ax.get_yticklabels() items += [ax, ax.title, ax.xaxis.label, ax.yaxis.label] # items += [ax, ax.title] bbox = Bbox.union([item.get_window_extent() for item in items]) return bbox.expanded(1.0 + pad, 1.0 + pad) #------------------------------------------------------------------------------ class MyMplToolbar(NavigationToolbar): """ Custom Matplotlib Navigationtoolbar, derived (sublassed) from Navigationtoolbar with the following changes: - new icon set - new functions and icons grid, full view - removed buttons for configuring subplots and editing curves - added an x,y location widget and icon derived from http://www.python-forum.de/viewtopic.php?f=24&t=26437 http://pydoc.net/Python/pyQPCR/0.7/pyQPCR.widgets.matplotlibWidget/ !! http://matplotlib.org/users/navigation_toolbar.html !! see also http://stackoverflow.com/questions/17711099/programmatically-change-matplotlib-toolbar-mode-in-qt4 http://matplotlib-users.narkive.com/C8XwIXah/need-help-with-darren-dale-qt-example-of-extending-toolbar https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/ Changing the info: http://stackoverflow.com/questions/15876011/add-information-to-matplotlib-navigation-toolbar-status-bar """ # toolitems = ( # ('Home', 'Reset original view', 'home', 'home'), # ('Back', 'Back to previous view', 'action-undo', 'back'), # ('Forward', 'Forward to next view', 'action-redo', 'forward'), # (None, None, None, None), # ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'), # ('Zoom', 'Zoom to rectangle', 'magnifying-glass', 'zoom'), # (None, None, None, None), # ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'), # ('Save', 'Save the figure', 'file', 'save_figure'), # ) # subclass NavigationToolbar, passing through arguments: #def __init__(self, canvas, parent, coordinates=True): def __init__(self, *args, **kwargs): NavigationToolbar.__init__(self, *args, **kwargs) # QtWidgets.QToolBar.__init__(self, parent) # def _icon(self, name): # return QtGui.QIcon(os.path.join(self.basedir, name)) # def _init_toolbar(self): # self.basedir = os.path.join(rcParams[ 'datapath' ], 'images/icons') iconDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','images','icons', '') # org self.basedir = os.path.join(rcParams['datapath'], 'images') self.basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','images', 'icons', '') #---------------- Construct Toolbar --------------------------------------- # ENABLE: a = self.addAction(QtGui.QIcon(iconDir + 'circle-check.svg'), \ 'Enable Plot', self.enable_update) a.setToolTip('Enable plot update') a.setCheckable(True) a.setChecked(True) # a.setEnabled(False) self.addSeparator() #--------------------------------------------- # HOME: self.a_ho = self.addAction(QtGui.QIcon(iconDir + 'home.svg'), \ 'Home', self.home) self.a_ho.setToolTip('Reset original view') # BACK: self.a_ba = self.addAction(QtGui.QIcon(iconDir + 'action-undo.svg'), \ 'Back', self.back) self.a_ba.setToolTip('Back to previous view') # FORWARD: self.a_fw = self.addAction(QtGui.QIcon(iconDir + 'action-redo.svg'), \ 'Forward', self.forward) self.a_fw.setToolTip('Forward to next view') self.addSeparator() #--------------------------------------------- # PAN: self.a_pa = self.addAction(QtGui.QIcon(iconDir + 'move.svg'), \ 'Pan', self.pan) self.a_pa.setToolTip("Pan axes with left mouse button, zoom with right,\n" "pressing x / y / CTRL yields horizontal / vertical / diagonal constraints.") self._actions['pan'] = self.a_pa self.a_pa.setCheckable(True) # ZOOM RECTANGLE: self.a_zo = self.addAction(QtGui.QIcon(iconDir + 'magnifying-glass.svg'), \ 'Zoom', self.zoom) self.a_zo.setToolTip("Zoom in / out to rectangle with left / right mouse button,\n" "pressing x / y / CTRL yields horizontal / vertical / diagonal constraints.") self._actions['zoom'] = self.a_zo self.a_zo.setCheckable(True) # FULL VIEW: self.a_fv = self.addAction(QtGui.QIcon(iconDir + 'fullscreen-enter.svg'), \ 'Zoom full extent', self.parent.pltFullView) self.a_fv.setToolTip('Zoom to full extent') # -------------------------------------- self.addSeparator() # -------------------------------------- # GRID: self.a_gr = self.addAction(QtGui.QIcon(iconDir + 'grid-four-up.svg'), \ 'Grid', self.toggle_grid) self.a_gr.setToolTip('Toggle Grid') self.a_gr.setCheckable(True) self.a_gr.setChecked(True) # REDRAW: self.a_rd = self.addAction(QtGui.QIcon(iconDir + 'brush.svg'), \ 'Redraw', self.parent.redraw) self.a_rd.setToolTip('Redraw Plot') # SAVE: self.a_sv = self.addAction(QtGui.QIcon(iconDir + 'file.svg'), \ 'Save', self.save_figure) self.a_sv.setToolTip('Save the figure') self.cb = None #will be used for the clipboard self.tempPath = getHomeDir() self.tempPath = os.path.join(self.tempPath,'tempMPL.png') self.a_cb = self.addAction(QtGui.QIcon(iconDir + 'camera-slr.svg'), \ 'Save', self.mpl2Clip) self.a_cb.setToolTip('Copy to clipboard') self.a_cb.setShortcut("Ctrl+C") # -------------------------------------- self.addSeparator() # -------------------------------------- if figureoptions is not None: self.a_op = self.addAction(QtGui.QIcon(iconDir + 'cog.svg'), 'Customize', self.edit_parameters) self.a_op.setToolTip('Edit curves line and axes parameters') self.buttons = {} # Add the x,y location widget at the right side of the toolbar # The stretch factor is 1 which means any resizing of the toolbar # will resize this label instead of the buttons. if self.coordinates: self.locLabel = QLabel("", self) self.locLabel.setAlignment( QtCore.Qt.AlignRight | QtCore.Qt.AlignTop) self.locLabel.setSizePolicy( QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Ignored)) labelAction = self.addWidget(self.locLabel) labelAction.setVisible(True) # reference holder for subplots_adjust window self.adj_window = None if figureoptions is not None: def edit_parameters(self): allaxes = self.canvas.figure.get_axes() if len(allaxes) == 1: axes = allaxes[0] else: titles = [] for axes in allaxes: title = axes.get_title() ylabel = axes.get_ylabel() label = axes.get_label() if title: fmt = "%(title)s" if ylabel: fmt += ": %(ylabel)s" fmt += " (%(axes_repr)s)" elif ylabel: fmt = "%(axes_repr)s (%(ylabel)s)" elif label: fmt = "%(axes_repr)s (%(label)s)" else: fmt = "%(axes_repr)s" titles.append(fmt % dict(title=title, ylabel=ylabel, label=label, axes_repr=repr(axes))) item, ok = QInputDialog.getItem( self.parent, 'Customize', 'Select axes:', titles, 0, False) if ok: axes = allaxes[titles.index(six.text_type(item))] else: return figureoptions.figure_edit(axes, self) # def mouse_move(self, event): # if not event.inaxes or not self._active: # if self._lastCursor != mplCursors.POINTER: # self.set_cursor(mplCursors.POINTER) # self._lastCursor = mplCursors.POINTER # else: # if self._active == 'ZOOM': # if self._lastCursor != mplCursors.SELECT_REGION: # self.set_cursor(mplCursors.SELECT_REGION) # self._lastCursor = mplCursors.SELECT_REGION # if self._xypress: # x, y = event.x, event.y # lastx, lasty, _, _, _, _ = self._xypress[0] # self.draw_rubberband(event, x, y, lastx, lasty) # elif (self._active == 'PAN' and # self._lastCursor != mplCursors.MOVE): # self.set_cursor(mplCursors.MOVE) # # self._lastCursor = mplCursors.MOVE # # if event.inaxes and event.inaxes.get_navigate(): # # try: s = event.inaxes.format_coord(event.xdata, event.ydata) # except ValueError: pass # except OverflowError: pass # else: # if len(self.mode): # self.set_message('%s : %s' % (self.mode, s)) # else: # self.set_message(s) # else: self.set_message(self.mode) def toggle_grid(self): """Toggle the grid and redraw the figure.""" self.grid = not self.grid self.parent.redraw() def enable_update(self): """ Toggle the enable button and setting and enable / disable all buttons accordingly. """ self.enable_update = not self.enable_update self.a_gr.setEnabled(self.enable_update) self.a_ho.setEnabled(self.enable_update) self.a_ba.setEnabled(self.enable_update) self.a_fw.setEnabled(self.enable_update) self.a_pa.setEnabled(self.enable_update) self.a_zo.setEnabled(self.enable_update) self.a_fv.setEnabled(self.enable_update) self.a_rd.setEnabled(self.enable_update) self.a_sv.setEnabled(self.enable_update) self.a_cb.setEnabled(self.enable_update) self.a_op.setEnabled(self.enable_update) def mpl2Clip(self): """ Save current figure to temporary file and copy it to the clipboard. """ try: self.canvas.figure.savefig(self.tempPath) # savefig(fname, dpi=None, facecolor='w', edgecolor='w', # orientation='portrait', papertype=None, format=None, # transparent=False): tempImg = QtGui.QImage(self.tempPath) self.cb = QtGui.QApplication.clipboard() self.cb.setImage(tempImg) except: print('Error copying figure to clipboard') errorMsg = "Sorry: %s\n\n:%s\n"%(sys.exc_type, sys.exc_value) print(errorMsg)
apache-2.0
kangwonlee/ECA
lab_01_intro/06_barh_demo.py
1
1535
# -*- coding: utf8 -*- """ Simple demo of a horizontal bar chart. 간단한 수평 막대 그래프 예제 """ # "lines_bars_and_markers example code: barh_demo.py" — Matplotlib 1.5.1 documentation. [Online]. Available: # http://matplotlib.org/examples/lines_bars_and_markers/barh_demo.html. [Accessed: 21-Aug-2016]. # matplotlib.pyplot 이라는 모듈에 그래프 관련 기능이 담겨 있음 # 해당 모듈의 기능을 사용하기 위해 plt 라는 이름으로 불러 옴 # 관련 기능은 plt. 으로 시작함 import matplotlib.pyplot as plt # rc 관련 설정을 초기화 해 줌 plt.rcdefaults() # 배열, 행렬 관련 기능을 담고 있는 numpy 모듈을 불러 들임 import numpy as np # 관련 기능은 np. 으로 시작함 # 그래프로 표시할 데이터 예 # y 축 : 사람 이름 people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim') # 사람 이름의 y 축 위 위치 y_pos = np.arange(len(people)) # x 축 : 성능 (한사람에 하나씩 임의의 숫자) performance = 3 + 10 * np.random.rand(len(people)) # x 축 error bar: 오차 (한사람에 하나씩 임의의 숫자) error = np.random.rand(len(people)) # 그래프 준비 시작 # 가로 막대 그래프 생성 plt.barh(y_pos, performance, xerr=error, align='center', alpha=0.4) # y 축을 따라 사람 이름을 표시 plt.yticks(y_pos, people) # x 축 이름 plt.xlabel('Performance') # 그래프 제목 plt.title('How fast do you want to go today?') # 그래프 준비 끝 # 준비된 그래프를 화면에 표시함 plt.show()
apache-2.0
mrcouts/Bootstrap-Paradox
Experimental/Aquisicoes/CTCx_circulo/aquisicoes.py
1
8604
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np from math import pi as Pi from txt2py import * from matplotlib.ticker import MultipleLocator A = txt2py("aquisicao_circulo_CTCx_lambda60_1.txt") #aquisicao_circulo_PIDSMCx_lambda70_phi6_k70_0_3: e_quad = 0.912252649215 | tau_quad = 0.238929403956 | s1_quad = 4.83123738371 | s2_quad = 4.92973723999 | ex_quad = 0.693586588682 | ey_quad = 0.592572814091 | tau1_quad = 0.163618207999 | tau2_quad = 0.174115886943 #aquisicao_SMCt_lambda60_phi20_k363_3: e_quad = 1.0865600936 | tau_quad = 0.237820339378 | s1_quad = 55.7256386861 | s2_quad = 52.1114451708 | ex_quad = 0.89867749944 | ey_quad = 0.610730373405 | tau1_quad = 0.162677093788 | tau2_quad = 0.173478174358 #aquisicao_circulo_SMCx_lambda60_phi6_k103_0_3: e_quad = 1.27647821995 | tau_quad = 0.260967898131 | s1_quad = 8.35898181713 | s2_quad = 4.9935905567 | ex_quad = 0.971027682922 | ey_quad = 0.828554092984 | tau1_quad = 0.169215497632 | tau2_quad = 0.198671485664 #aquisicao_circulo_PIDx_lambda70_4: e_quad = 1.28260640104 | tau_quad = 0.219666166256 | s1_quad = 6.09596142926 | s2_quad = 4.45005347346 | ex_quad = 1.14463260263 | ey_quad = 0.578701464488 | tau1_quad = 0.149533211028 | tau2_quad = 0.16091315483 #aquisicao_circulo_CTCt_lambda60_3: e_quad = 1.61909535266 | tau_quad = 0.202338133235 | s1_quad = 45.0666924646 | s2_quad = 53.5093564972 | ex_quad = 1.31948403439 | ey_quad = 0.938313190784 | tau1_quad = 0.132682753043 | tau2_quad = 0.152761275217 #aquisicao_circulo_CTCx_lambda60_1: e_quad = 1.63913254193 | tau_quad = 0.212001438004 | s1_quad = 5.99433632576 | s2_quad = 4.31327608202 | ex_quad = 1.358628447 | ey_quad = 0.91699740076 | tau1_quad = 0.140617598848 | tau2_quad = 0.158654658331 #aquisicao_circulo_PIDSMCt_lambda35_phi20_k200_6: e_quad = 3.34857383374 | tau_quad = 0.198843595669 | s1_quad = 38.0594077422 | s2_quad = 36.1432633763 | ex_quad = 1.6688967167 | ey_quad = 2.90305536788 | tau1_quad = 0.135978269066 | tau2_quad = 0.14508165246 #aquisicao_circulo_PIDt_lambda35_11: e_quad = 4.45725019704 | tau_quad = 0.186064672078 | s1_quad = 34.1700477306 | s2_quad = 32.2174188766 | ex_quad = 2.3747188143 | ey_quad = 3.77197426714 | tau1_quad = 0.120771293755 | tau2_quad = 0.141542773748 #aquisicao_triangulo_SMCx_lambda60_phi6_k103_0_2: e_quad = 1.930624, i_quad = 4.677893, ex_quad = 1.499759, s1_quad = 8.5627540, s2_quad = 15.5908030, ey_quad = 1.215743, i1_quad = 3.047811, i2_quad = 3.548737 #aquisicao_triangulo_SMCt_lambda60_phi20_k363_2: e_quad = 2.315927, i_quad = 4.790667, ex_quad = 2.073283, s1_quad = 90.622643, s2_quad = 96.8548660, ey_quad = 1.031995, i1_quad = 3.142629, i2_quad = 3.615850 #aquisicao_triangulo_PIDSMCx_lambda60_phi6_k103_0_3: e_quad = 2.534649, i_quad = 4.721774, ex_quad = 0.985656, s1_quad = 9.2306470, s2_quad = 23.4585950, ey_quad = 2.335151, i1_quad = 2.900244, i2_quad = 3.726088 #aquisicao_triangulo_CTCx_lambda60_4: e_quad = 2.593759, i_quad = 4.709301, ex_quad = 2.198307, s1_quad = 10.335380, s2_quad = 15.6801630, ey_quad = 1.376601, i1_quad = 3.014247, i2_quad = 3.618262 #aquisicao_triangulo_CTCt_lambda60_5: e_quad = 2.607846, i_quad = 4.541315, ex_quad = 2.244732, s1_quad = 85.878242, s2_quad = 106.841187, ey_quad = 1.327417, i1_quad = 2.874919, i2_quad = 3.515448 #aquisicao_triangulo_PIDx_lambda70_6: e_quad = 2.803854, i_quad = 4.803226, ex_quad = 1.434845, s1_quad = 10.004310, s2_quad = 23.8964730, ey_quad = 2.408903, i1_quad = 2.968096, i2_quad = 3.776425 #aquisicao_triangulo_PIDSMCt_lambda25_phi90_k200_2: e_quad = 7.304744, i_quad = 5.331062, ex_quad = 4.019952, s1_quad = 81.700752, s2_quad = 70.6879430, ey_quad = 6.099121, i1_quad = 3.646859, i2_quad = 3.888527 #aquisicao_triangulo_PIDt_lambda25_2: e_quad = 7.615739, i_quad = 4.938209, ex_quad = 3.926870, s1_quad = 70.306961, s2_quad = 66.9731520, ey_quad = 6.525272, i1_quad = 3.150745, i2_quad = 3.802461 end = 3000 t_np = np.array([A[i][0] for i in range(end)]) x_np = np.array([A[i][1] for i in range(end)]) y_np = np.array([A[i][2] for i in range(end)]) xref_np= np.array([A[i][3] for i in range(end)]) yref_np= np.array([A[i][4] for i in range(end)]) ex_np = np.array([A[i][5] for i in range(end)]) ey_np = np.array([A[i][6] for i in range(end)]) i1_np = np.array([A[i][7] for i in range(end)]) i2_np = np.array([A[i][8] for i in range(end)]) u1_np = np.array([A[i][9] for i in range(end)]) u2_np = np.array([A[i][10] for i in range(end)]) s1_np = np.array([A[i][11] for i in range(end)]) s2_np = np.array([A[i][12] for i in range(end)]) x_np = 1000.0*x_np y_np = 1000.0*y_np xref_np = 1000.0*xref_np yref_np = 1000.0*yref_np ex_np = 1000.0*ex_np ey_np = 1000.0*ey_np t_np = 0.001*t_np tau1_np = 0.055984*i1_np tau2_np = 0.0566596*i2_np ex_f = 0 ey_f = 0 for i in range(end-33,end): ex_f += ex_np[i] ey_f += ey_np[i] ex_f = ex_f/33.0 ey_f = ey_f/33.0 e_f = (ex_f**2 + ey_f**2)**0.5 print "ex_f =", ex_f, "| ey_f =", ey_f, "| e_f =", e_f """T = 1000 Ta = 3 n2 = (2-1)*T/3 n8 = (8-1)*T/3 print n2 print n8 ex_quad = 0.0 ey_quad = 0.0 tau1_quad = 0.0 tau2_quad = 0.0 s1_quad = 0.0 s2_quad = 0.0 for i in range(n2,n8): ex_quad += ex_np[i]**2 ey_quad += ey_np[i]**2 tau1_quad += tau1_np[i]**2 tau2_quad += tau2_np[i]**2 s1_quad += s1_np[i]**2 s2_quad += s2_np[i]**2 ex_quad = 1000*(2*ex_quad/(n8-n2))**0.5 ey_quad = 1000*(2*ey_quad/(n8-n2))**0.5 e_quad = (ex_quad**2 + ey_quad**2)**0.5 tau1_quad = (2*tau1_quad/(n8-n2))**0.5 tau2_quad = (2*tau2_quad/(n8-n2))**0.5 tau_quad = (tau1_quad**2 + tau2_quad**2)**0.5 s1_quad = (2*s1_quad/(n8-n2))**0.5 s2_quad = (2*s2_quad/(n8-n2))**0.5 print "e_quad =", e_quad, "| tau_quad =", tau_quad, "| s1_quad =", s1_quad, "| s2_quad =", s2_quad, "| ex_quad =", ex_quad, "| ey_quad =", ey_quad, "| tau1_quad =", tau1_quad, "| tau2_quad =", tau2_quad """ ml_minor = MultipleLocator(1) ml_major = MultipleLocator(4) fig, ax = plt.subplots() ax.plot(xref_np, yref_np, 'b', linewidth=1, label= 'Refer' + u'ê' 'ncia') ax.plot(x_np, y_np, 'r', linewidth=1, label='Trajet' + u'ó' + 'ria real') plt.xlabel(r'$x[mm]$', fontsize=18) plt.ylabel(r'$y[mm]$', fontsize=18) plt.axis('equal') plt.title('Trajet' + u'ó' + 'ria realizada') ax.legend(loc=4, ncol=1, prop={'size': 10}) plt.savefig('xy.png') plt.figure() plt.plot(t_np, ex_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$', fontsize=18) plt.ylabel(r'$e_x[mm]$', fontsize=18) plt.ylim(-9.5, 9.5) plt.axes().yaxis.set_minor_locator(ml_minor) plt.axes().yaxis.set_major_locator(ml_major) plt.title('Erro de posi' + u'ç' + u'ã' + 'o em fun' + u'ç' + u'ã' 'o do tempo (coordenada x)') plt.savefig('ex.png') plt.figure() plt.plot(t_np, ey_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$', fontsize=18) plt.ylabel(r'$e_y[mm]$', fontsize=18) plt.ylim(-9.5, 9.5) plt.axes().yaxis.set_minor_locator(ml_minor) plt.axes().yaxis.set_major_locator(ml_major) plt.title('Erro de posi' + u'ç' + u'ã' + 'o em fun' + u'ç' + u'ã' 'o do tempo (coordenada y)') plt.savefig('ey.png') plt.figure() plt.plot(t_np, tau1_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$', fontsize=18) plt.ylabel(r'$\tau_1[Nm]$', fontsize=18) plt.ylim(-0.7, 0.7) plt.title('Torque aplicado ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)') plt.savefig('tau1.png') plt.figure() plt.plot(t_np, tau2_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$', fontsize=18) plt.ylabel(r'$\tau_2[Nm]$', fontsize=18) plt.ylim(-0.7, 0.7) plt.title('Torque aplicado ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)') plt.savefig('tau2.png') plt.figure() plt.plot(t_np, u1_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$') plt.ylabel(r'$u_1[V]$') plt.title('Tens' + u'ã' + 'o aplicada ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)') plt.savefig('u1.png') plt.figure() plt.plot(t_np, u2_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$') plt.ylabel(r'$u_2[V]$') plt.title('Tens' + u'ã' + 'o aplicada ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)') plt.savefig('u2.png') plt.figure() plt.plot(t_np, s1_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$') plt.ylabel(r'$s_1[rad/s^2]$') plt.title('Vari' + u'á' + 'vel de escorregamento ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)') plt.savefig('s1.png') plt.figure() plt.plot(t_np, s2_np, 'r', linewidth=1.01) plt.xlabel(r'$t[s]$') plt.ylabel(r'$s_2[rad/s^2]$') plt.title('Vari' + u'á' + 'vel de escorregamento ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)') plt.savefig('s2.png')
gpl-3.0
depet/scikit-learn
examples/linear_model/plot_omp.py
8
2229
""" =========================== Orthogonal Matching Pursuit =========================== Using orthogonal matching pursuit for recovering a sparse signal from a noisy measurement encoded with a dictionary """ print(__doc__) import pylab as pl import numpy as np from sklearn.linear_model import OrthogonalMatchingPursuit from sklearn.linear_model import OrthogonalMatchingPursuitCV from sklearn.datasets import make_sparse_coded_signal n_components, n_features = 512, 100 n_nonzero_coefs = 17 # generate the data ################### # y = Xw # |x|_0 = n_nonzero_coefs y, X, w = make_sparse_coded_signal(n_samples=1, n_components=n_components, n_features=n_features, n_nonzero_coefs=n_nonzero_coefs, random_state=0) idx, = w.nonzero() # distort the clean signal ########################## y_noisy = y + 0.05 * np.random.randn(len(y)) # plot the sparse signal ######################## pl.figure(figsize=(7, 7)) pl.subplot(4, 1, 1) pl.xlim(0, 512) pl.title("Sparse signal") pl.stem(idx, w[idx]) # plot the noise-free reconstruction #################################### omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) omp.fit(X, y) coef = omp.coef_ idx_r, = coef.nonzero() pl.subplot(4, 1, 2) pl.xlim(0, 512) pl.title("Recovered signal from noise-free measurements") pl.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction ############################### omp.fit(X, y_noisy) coef = omp.coef_ idx_r, = coef.nonzero() pl.subplot(4, 1, 3) pl.xlim(0, 512) pl.title("Recovered signal from noisy measurements") pl.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction with number of non-zeros set by CV ################################################################## omp_cv = OrthogonalMatchingPursuitCV() omp_cv.fit(X, y_noisy) coef = omp_cv.coef_ idx_r, = coef.nonzero() pl.subplot(4, 1, 4) pl.xlim(0, 512) pl.title("Recovered signal from noisy measurements with CV") pl.stem(idx_r, coef[idx_r]) pl.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38) pl.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit', fontsize=16) pl.show()
bsd-3-clause
johnmgregoire/NanoCalorimetry
plot_heatrate_singlecell.py
1
5180
import time, copy import os import sys import numpy import h5py #from PnSC_ui import * #from PnSC_dataimport import * from PnSC_SCui import * #from PnSC_math import * from PnSC_h5io import * from PnSC_main import * from matplotlib.ticker import FuncFormatter import scipy.integrate selectcell=11 p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5' def myexpformat(x, pos): for ndigs in range(2): lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-') if eval(lab)==x: return lab return lab ExpTickLabels=FuncFormatter(myexpformat) def make_ticklabels_invisible(ax, x=True, y=True): if x: for tl in ax.get_xticklabels(): tl.set_visible(False) if y: for tl in ax.get_yticklabels(): tl.set_visible(False) def paramave_T(d, T, Twin=10., hpsdkey='sampleheatrate'): #i=numpy.argmin((T-d['sampletemperature'])**2) Ta=d['sampletemperature'][cycleindex] x=numpy.where((Ta>=T-Twin)&(Ta<=T+Twin))[0] prev=numpy.array([not (t-1 in x) for t in x]) previ=numpy.where(prev)[0] if len(previ)==0: return 0. stopi=numpy.append(previ[1:],len(x)) longestbunchind=numpy.argmax(stopi-previ) inds=x[previ[longestbunchind]:stopi[longestbunchind]] return d[hpsdkey][cycleindex][inds].mean() def findenthalpyandpinacles(segdict, critenth=1.e-5, dTmin=.4, Tmeanmin=100.): T=segdict['sampletemperature'][cycleindex] C=segdict['sampleheatcapacity'][cycleindex] nci=numpy.where((C[:-1]>0.)&(C[1:]<=0.))[0]#neg crossings pci=numpy.where((C[1:]>0.)&(C[:-1]<=0.))[0]#pos crossings ci=numpy.sort(numpy.concatenate([nci, pci])) ans=[] for i, j in zip(ci[:-1], ci[1:]): enth=scipy.integrate.trapz(C[i:j], T[i:j]) if numpy.abs(enth)>critenth and (T[j]-T[i])>dTmin: itemp=numpy.argmax(numpy.abs(C[i:j])) Tmean=scipy.integrate.trapz(C[i:j]*T[i:j], T[i:j])/scipy.integrate.trapz(C[i:j], T[i:j]) if Tmean<Tmeanmin: continue ans+=[dict([('enthalpy', enth), ('T_Cmax', T[i:j][itemp]), ('Cmax', C[i:j][itemp]), ('Tweightedmean', Tmean), ('cycindstart', i), ('cycindstop', j)])] return ans nskip=100 cycleindex=0 #p=mm.h5path #f=h5py.File(p, mode='r+') #f=h5py.File(p, mode='r') savef='C:/Users/JohnnyG/Documents/HarvardWork/MG/PnSCplots/batchplotbycell_June2' plotTlim=(50., 700.) metadictlist=[] allsegdict=[] f=h5py.File(p, mode='r') cg=f['calbycellmetadata'][`selectcell`] for mg in cg.itervalues(): if isinstance(mg, h5py.Group) and 'name' in mg.attrs.keys(): d={} for k, v in mg.attrs.iteritems(): d[k]=v # if selectcell==1 and d['name'].startswith('heat1'):#heat1a was botched and heat1b we don't know cooling rate and the XRd for heat0 was questionable anyway # continue metadictlist+=[d] allsegdict+=[CreateHeatProgSegDictList(p, d['name'], d['h5hpname'])] #xrddictlist=[] #if 'xrdbycell' in f and `selectcell` in f['xrdbycell']: # cg=f['xrdbycell'][`selectcell`] # for mg in cg.itervalues(): # if isinstance(mg, h5py.Group): # d={} # for k, v in mg.attrs.iteritems(): # d[k]=v # xrddictlist+=[d] f.close() orderarray=numpy.abs(numpy.array([metadict['heatrate_170C500C'] for metadict in metadictlist])) sortinds=numpy.argsort(orderarray) cols=['b', (160./256.,160./256.,0), 'r', 'g', 'c', 'm', 'k'] ## plotting series of heat ramps mult=1.e6 nplots=len(orderarray) pylab.figure(figsize=(8, 8)) axl=[pylab.subplot(nplots, 1, nplots)] for i in range(1, nplots): #ax=pylab.subplot2grid((n, 3), (n-1-i, 0), colspan=2, sharex=axl[0], sharey=axl[0]) ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0], sharey=axl[0]) pylab.setp(ax.get_xticklabels(), visible=False) axl+=[ax] f=h5py.File(p, mode='r') x=[] y=[] x2=[] y2=[] for count, i in enumerate(sortinds): hpsdl=allsegdict[i] metadict=metadictlist[i] pave=paramave_T(hpsdl[metadict['heatseg']], 400., Twin=100., hpsdkey='samplepower') dtave=paramave_T(hpsdl[metadict['heatseg']], 400., Twin=100., hpsdkey='sampleheatrate') cdtave=numpy.abs(paramave_T(hpsdl[metadict['coolseg']], 400., Twin=100., hpsdkey='sampleheatrate')) if dtave>0: y+=[dtave/pave] s=f['Calorimetry'][metadict['name']]['measurement/HeatProgram'][metadict['h5hpname']].attrs['ambient_atmosphere'] press=(s.endswith('mT') and (eval(s.strip().partition('mT')[0]), ) or (0., ))[0] x+=[press] print metadict['name'], pave, dtave, press x2+=[press] y2+=[cdtave] if metadict['name']=='heat4a': break f.close() pylab.clf() pylab.plot(x, y, 'ro') pylab.xlabel('He pressure (mT)', fontsize=14) pylab.ylabel('heat rate per power, dT/dt / P (K/s/W), at 300-500C', fontsize=14) pylab.gca().yaxis.set_major_formatter(ExpTickLabels) pylab.twinx() pylab.plot(x2, y2, 'bo') pylab.gca().yaxis.set_major_formatter(ExpTickLabels) pylab.ylabel('cool rate (K/s), at 300-500C', fontsize=14) pylab.show()
bsd-3-clause
evan-magnusson/dynamic
Data/Calibration/Firm Calibration Python/parameters/depreciation/depreciation_calibration.py
2
2016
""" Depreciation Rate Calibration (depreciation_calibration.py): ------------------------------------------------------------------------------- Last updated: 6/26/2015. This module calibrates the firm economic and tax depreciation parameters. """ # Packages: import os.path import sys import numpy as np import pandas as pd # Directories: _CUR_DIR = os.path.dirname(__file__) _DATA_DIR = os.path.join(_CUR_DIR, "data") _PROC_DIR = os.path.join(_CUR_DIR, "processing") _OUT_DIR = os.path.join(_CUR_DIR, "output") # Importing custom modules: import naics_processing as naics import constants as cst # Importing depreciation helper custom modules: sys.path.append(_PROC_DIR) import calc_rates as calc_rates import read_bea as read_bea import read_inventories as read_inv import read_land as read_land # Dataframe names: _CODE_DF_NM = cst.CODE_DF_NM # Dataframe column names: _CORP_TAX_SECTORS_NMS_DICT = cst.CORP_TAX_SECTORS_NMS_DICT _CORP_NMS = _CORP_TAX_SECTORS_NMS_DICT.values() _NON_CORP_TAX_SECTORS_NMS_DICT = cst.NON_CORP_TAX_SECTORS_NMS_DICT _NCORP_NMS = _NON_CORP_TAX_SECTORS_NMS_DICT.values() def init_depr_rates(asset_tree=naics.generate_tree(), get_econ=False, get_tax_est=False, get_tax_150=False, get_tax_200=False, get_tax_sl=False, get_tax_ads=False, soi_from_out=False, output_data=False): """ This fun """ # Calculating the fixed asset data: fixed_asset_tree = read_bea.read_bea(asset_tree) # Calculating the inventory data: inv_tree = read_inv.read_inventories(asset_tree) # Calculating the land data: land_tree = read_land.read_land(asset_tree) # Calculating the depreciation rates: econ_depr_tree = calc_rates.calc_depr_rates(fixed_asset_tree, inv_tree, land_tree) tax_depr_tree = calc_rates.calc_tax_depr_rates(fixed_asset_tree, inv_tree, land_tree) #naics.pop_rates(tax_depr_tree) return {"Econ": econ_depr_tree, "Tax": tax_depr_tree}
mit
belltailjp/scikit-learn
examples/model_selection/grid_search_text_feature_extraction.py
253
4158
""" ========================================================== Sample pipeline for text feature extraction and evaluation ========================================================== The dataset used in this example is the 20 newsgroups dataset which will be automatically downloaded and then cached and reused for the document classification example. You can adjust the number of categories by giving their names to the dataset loader or setting them to None to get the 20 of them. Here is a sample output of a run on a quad-core machine:: Loading 20 newsgroups dataset for categories: ['alt.atheism', 'talk.religion.misc'] 1427 documents 2 categories Performing grid search... pipeline: ['vect', 'tfidf', 'clf'] parameters: {'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07), 'clf__n_iter': (10, 50, 80), 'clf__penalty': ('l2', 'elasticnet'), 'tfidf__use_idf': (True, False), 'vect__max_n': (1, 2), 'vect__max_df': (0.5, 0.75, 1.0), 'vect__max_features': (None, 5000, 10000, 50000)} done in 1737.030s Best score: 0.940 Best parameters set: clf__alpha: 9.9999999999999995e-07 clf__n_iter: 50 clf__penalty: 'elasticnet' tfidf__use_idf: True vect__max_n: 2 vect__max_df: 0.75 vect__max_features: 50000 """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause from __future__ import print_function from pprint import pprint from time import time import logging from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) data = fetch_20newsgroups(subset='train', categories=categories) print("%d documents" % len(data.filenames)) print("%d categories" % len(data.target_names)) print() ############################################################################### # define a pipeline combining a text feature extractor with a simple # classifier pipeline = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier()), ]) # uncommenting more parameters will give better exploring power but will # increase processing time in a combinatorial way parameters = { 'vect__max_df': (0.5, 0.75, 1.0), #'vect__max_features': (None, 5000, 10000, 50000), 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams #'tfidf__use_idf': (True, False), #'tfidf__norm': ('l1', 'l2'), 'clf__alpha': (0.00001, 0.000001), 'clf__penalty': ('l2', 'elasticnet'), #'clf__n_iter': (10, 50, 80), } if __name__ == "__main__": # multiprocessing requires the fork to happen in a __main__ protected # block # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") pprint(parameters) t0 = time() grid_search.fit(data.data, data.target) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name]))
bsd-3-clause
sanketloke/scikit-learn
benchmarks/bench_random_projections.py
397
8900
""" =========================== Random projection benchmark =========================== Benchmarks for random projections. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import collections import numpy as np import scipy.sparse as sp from sklearn import clone from sklearn.externals.six.moves import xrange from sklearn.random_projection import (SparseRandomProjection, GaussianRandomProjection, johnson_lindenstrauss_min_dim) def type_auto_or_float(val): if val == "auto": return "auto" else: return float(val) def type_auto_or_int(val): if val == "auto": return "auto" else: return int(val) def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_scikit_transformer(X, transfomer): gc.collect() clf = clone(transfomer) # start time t_start = datetime.now() clf.fit(X) delta = (datetime.now() - t_start) # stop time time_to_fit = compute_time(t_start, delta) # start time t_start = datetime.now() clf.transform(X) delta = (datetime.now() - t_start) # stop time time_to_transform = compute_time(t_start, delta) return time_to_fit, time_to_transform # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None): rng = np.random.RandomState(random_state) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def print_row(clf_type, time_fit, time_transform): print("%s | %s | %s" % (clf_type.ljust(30), ("%.4fs" % time_fit).center(12), ("%.4fs" % time_transform).center(12))) if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-features", dest="n_features", default=10 ** 4, type=int, help="Number of features in the benchmarks") op.add_option("--n-components", dest="n_components", default="auto", help="Size of the random subspace." " ('auto' or int > 0)") op.add_option("--ratio-nonzeros", dest="ratio_nonzeros", default=10 ** -3, type=float, help="Number of features in the benchmarks") op.add_option("--n-samples", dest="n_samples", default=500, type=int, help="Number of samples in the benchmarks") op.add_option("--random-seed", dest="random_seed", default=13, type=int, help="Seed used by the random number generators.") op.add_option("--density", dest="density", default=1 / 3, help="Density used by the sparse random projection." " ('auto' or float (0.0, 1.0]") op.add_option("--eps", dest="eps", default=0.5, type=float, help="See the documentation of the underlying transformers.") op.add_option("--transformers", dest="selected_transformers", default='GaussianRandomProjection,SparseRandomProjection', type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. Available: " "GaussianRandomProjection,SparseRandomProjection") op.add_option("--dense", dest="dense", default=False, action="store_true", help="Set input space as a dense matrix.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) opts.n_components = type_auto_or_int(opts.n_components) opts.density = type_auto_or_float(opts.density) selected_transformers = opts.selected_transformers.split(',') ########################################################################### # Generate dataset ########################################################################### n_nonzeros = int(opts.ratio_nonzeros * opts.n_features) print('Dataset statics') print("===========================") print('n_samples \t= %s' % opts.n_samples) print('n_features \t= %s' % opts.n_features) if opts.n_components == "auto": print('n_components \t= %s (auto)' % johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)) else: print('n_components \t= %s' % opts.n_components) print('n_elements \t= %s' % (opts.n_features * opts.n_samples)) print('n_nonzeros \t= %s per feature' % n_nonzeros) print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros) print('') ########################################################################### # Set transformer input ########################################################################### transformers = {} ########################################################################### # Set GaussianRandomProjection input gaussian_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed } transformers["GaussianRandomProjection"] = \ GaussianRandomProjection(**gaussian_matrix_params) ########################################################################### # Set SparseRandomProjection input sparse_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed, "density": opts.density, "eps": opts.eps, } transformers["SparseRandomProjection"] = \ SparseRandomProjection(**sparse_matrix_params) ########################################################################### # Perform benchmark ########################################################################### time_fit = collections.defaultdict(list) time_transform = collections.defaultdict(list) print('Benchmarks') print("===========================") print("Generate dataset benchmarks... ", end="") X_dense, X_sparse = make_sparse_random_data(opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed) X = X_dense if opts.dense else X_sparse print("done") for name in selected_transformers: print("Perform benchmarks for %s..." % name) for iteration in xrange(opts.n_times): print("\titer %s..." % iteration, end="") time_to_fit, time_to_transform = bench_scikit_transformer(X_dense, transformers[name]) time_fit[name].append(time_to_fit) time_transform[name].append(time_to_transform) print("done") print("") ########################################################################### # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Transformer performance:") print("===========================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") print("%s | %s | %s" % ("Transformer".ljust(30), "fit".center(12), "transform".center(12))) print(31 * "-" + ("|" + "-" * 14) * 2) for name in sorted(selected_transformers): print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name])) print("") print("")
bsd-3-clause
IntelLabs/hpat
examples/series_getitem/series_getitem_slice.py
1
1840
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** """ Expected Series: 3 7 4 6 5 5 6 4 dtype: int64 """ import numpy as np import pandas as pd from numba import njit @njit def series_getitem_slice(): series = pd.Series(np.arange(10, 0, -1)) # Series of 10, 9, ..., 1 return series[3:7] # Accessing series by slice index print(series_getitem_slice())
bsd-2-clause
karstenw/nodebox-pyobjc
examples/Extended Application/matplotlib/examples/axes_grid1/demo_axes_grid.py
1
4817
""" ============== Demo Axes Grid ============== """ import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end def get_demo_image(): import numpy as np from matplotlib.cbook import get_sample_data f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False) z = np.load(f) # z is a numpy array of 15x15 return z, (-3, 4, -4, 3) def demo_simple_grid(fig): """ A grid of 2x2 images with 0.05 inch pad between images and only the lower-left axes is labeled. """ grid = ImageGrid(fig, 141, # similar to subplot(141) nrows_ncols=(2, 2), axes_pad=0.05, label_mode="1", ) Z, extent = get_demo_image() for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest") # This only affects axes in first column and second row as share_all = # False. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) def demo_grid_with_single_cbar(fig): """ A grid of 2x2 images with a single colorbar """ grid = ImageGrid(fig, 142, # similar to subplot(142) nrows_ncols=(2, 2), axes_pad=0.0, share_all=True, label_mode="L", cbar_location="top", cbar_mode="single", ) Z, extent = get_demo_image() for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest") grid.cbar_axes[0].colorbar(im) for cax in grid.cbar_axes: cax.toggle_label(False) # This affects all axes as share_all = True. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) def demo_grid_with_each_cbar(fig): """ A grid of 2x2 images. Each image has its own colorbar. """ grid = ImageGrid(fig, 143, # similar to subplot(143) nrows_ncols=(2, 2), axes_pad=0.1, label_mode="1", share_all=True, cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="2%", ) Z, extent = get_demo_image() for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest") grid.cbar_axes[i].colorbar(im) for cax in grid.cbar_axes: cax.toggle_label(False) # This affects all axes because we set share_all = True. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) def demo_grid_with_each_cbar_labelled(fig): """ A grid of 2x2 images. Each image has its own colorbar. """ grid = ImageGrid(fig, 144, # similar to subplot(144) nrows_ncols=(2, 2), axes_pad=(0.45, 0.15), label_mode="1", share_all=True, cbar_location="right", cbar_mode="each", cbar_size="7%", cbar_pad="2%", ) Z, extent = get_demo_image() # Use a different colorbar range every time limits = ((0, 1), (-2, 2), (-1.7, 1.4), (-1.5, 1)) for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest", vmin=limits[i][0], vmax=limits[i][1]) grid.cbar_axes[i].colorbar(im) for i, cax in enumerate(grid.cbar_axes): cax.set_yticks((limits[i][0], limits[i][1])) # This affects all axes because we set share_all = True. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) if 1: F = plt.figure(1, (10.5, 2.5)) F.subplots_adjust(left=0.05, right=0.95) demo_simple_grid(F) demo_grid_with_single_cbar(F) demo_grid_with_each_cbar(F) demo_grid_with_each_cbar_labelled(F) plt.draw() pltshow(plt)
mit
fujy/ROS-Project
src/rbx1/rbx1_vision/nodes/common.py
2
3499
import numpy as np import cv2 import os image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm'] def splitfn(fn): path, fn = os.path.split(fn) name, ext = os.path.splitext(fn) return path, name, ext def anorm2(a): return (a*a).sum(-1) def anorm(a): return np.sqrt( anorm2(a) ) def homotrans(H, x, y): xs = H[0, 0]*x + H[0, 1]*y + H[0, 2] ys = H[1, 0]*x + H[1, 1]*y + H[1, 2] s = H[2, 0]*x + H[2, 1]*y + H[2, 2] return xs/s, ys/s def to_rect(a): a = np.ravel(a) if len(a) == 2: a = (0, 0, a[0], a[1]) return np.array(a, np.float64).reshape(2, 2) def rect2rect_mtx(src, dst): src, dst = to_rect(src), to_rect(dst) cx, cy = (dst[1] - dst[0]) / (src[1] - src[0]) tx, ty = dst[0] - src[0] * (cx, cy) M = np.float64([[ cx, 0, tx], [ 0, cy, ty], [ 0, 0, 1]]) return M def lookat(eye, target, up = (0, 0, 1)): fwd = np.asarray(target, np.float64) - eye fwd /= anorm(fwd) right = np.cross(fwd, up) right /= anorm(right) down = np.cross(fwd, right) R = np.float64([right, down, fwd]) tvec = -np.dot(R, eye) return R, tvec def mtx2rvec(R): w, u, vt = cv2.SVDecomp(R - np.eye(3)) p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0]) c = np.dot(vt[0], p) s = np.dot(vt[1], p) axis = np.cross(vt[0], vt[1]) return axis * np.arctan2(s, c) def draw_str(dst, (x, y), s): cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, linetype=cv2.CV_AA) cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), linetype=cv2.CV_AA) class Sketcher: def __init__(self, windowname, dests, colors_func): self.prev_pt = None self.windowname = windowname self.dests = dests self.colors_func = colors_func self.dirty = False self.show() cv2.setMouseCallback(self.windowname, self.on_mouse) def show(self): cv2.imshow(self.windowname, self.dests[0]) def on_mouse(self, event, x, y, flags, param): pt = (x, y) if event == cv2.EVENT_LBUTTONDOWN: self.prev_pt = pt if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON: for dst, color in zip(self.dests, self.colors_func()): cv2.line(dst, self.prev_pt, pt, color, 5) self.dirty = True self.prev_pt = pt self.show() else: self.prev_pt = None # palette data from matplotlib/_cm.py _jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1), (1, 0.5, 0.5)), 'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1), (0.91,0,0), (1, 0, 0)), 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0), (1, 0, 0))} cmap_data = { 'jet' : _jet_data } def make_cmap(name, n=256): data = cmap_data[name] xs = np.linspace(0.0, 1.0, n) channels = [] eps = 1e-6 for ch_name in ['blue', 'green', 'red']: ch_data = data[ch_name] xp, yp = [], [] for x, y1, y2 in ch_data: xp += [x, x+eps] yp += [y1, y2] ch = np.interp(xs, xp, yp) channels.append(ch) return np.uint8(np.array(channels).T*255) def nothing(*arg, **kw): pass def clock(): return cv2.getTickCount() / cv2.getTickFrequency()
mit
jasonbunk/NBodyGalaxySimulation
run_two_plummer_collision_MPI_and_OMP.py
1
3655
import os, sys, time import math import numpy as np from PlummerGalaxy import PlummerGalaxy from InitialConditions import InitialConditions '''from plot_or_make_video import MakeVideo import imp try: imp.find_module('matplotlib') matplotlibAvailable = True import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D except ImportError: matplotlibAvailable = False''' if len(sys.argv) < 6: print("usage: {prefix} {total num pts} {numMPIprocs} {numCacheGrps} {Nburst}") if len(sys.argv) >= 6: preffix=sys.argv[1] TotalNumPts = int(sys.argv[2]) numMPIprocs = sys.argv[3] numMPImemgrps = sys.argv[4] nburstburst = sys.argv[5] print("using user-supplied arguments") else: preffix="default" TotalNumPts = 600 numMPIprocs = str( 6 ) numMPImemgrps = str( 2 ) #will be forced to be 2 nburstburst = str( 3 ) print("QUITTING: PLEASE PROVIDE PROPER ARGS") quit() GravitationalConst = 1.0 # Settings: InitialConditionsFolder = "data/initialconditions/" OutputResultsFolder = "data/results/" createNewInitialConditions = True MakePositionsVideo = False UseImageMagickForFancierVideo = False #================================================================================= starttime = time.time() try: os.system('rm NBodySim_CPU_MPI/nbodycpumpi') #os.system('rm data/initialconditions/*') except: print("executable already deleted") print("compiling C++ code...") os.system("(cd NBodySim_CPU_MPI && make)") if createNewInitialConditions: assert(numMPImemgrps == '2' or numMPImemgrps == '1') timeStep = 0.1 timeMax = 10.0 epssqd = 0.05 AarsethHeader = str(TotalNumPts)+" 0.01 "+str(timeStep)+" "+str(timeMax)+" "+str(epssqd)+" "+str(GravitationalConst)+"\n" galaxy1 = PlummerGalaxy() galaxy1.npts = (TotalNumPts/2) galaxy1.R = 1.0 galaxy1.ZeroVelocities_Bool = False galaxy1.GenerateInitialConditions(-4, -4, 0) galaxy2 = PlummerGalaxy() galaxy2.npts = (TotalNumPts/2) galaxy2.R = 1.0 galaxy2.ZeroVelocities_Bool = False galaxy2.GenerateInitialConditions(4, 4, 0) if numMPImemgrps == '2': galaxy1.WriteInitialConditionsToFile(InitialConditionsFolder+preffix+"0.data", AarsethHeader) galaxy2.WriteInitialConditionsToFile(InitialConditionsFolder+preffix+"1.data", AarsethHeader) else: bothGalaxies = InitialConditions() bothGalaxies.extend(galaxy1) bothGalaxies.extend(galaxy2) AarsethHeader = str(TotalNumPts)+" 0.01 "+str(timeStep)+" "+str(timeMax)+" "+str(epssqd)+" "+str(GravitationalConst)+"\n" bothGalaxies.WriteInitialConditionsToFile(InitialConditionsFolder+preffix+"0.data", AarsethHeader) quit() #args: {NumDataSplits} {nburst-between-disksaves} {InitialConditionsFilename-Prefix} {OutputFilename-Prefix} print("Running compiled MPI/OpenMP C++ nbody code on two-Plummer-collision initial conditions files") os.system('mpirun -np '+numMPIprocs+' ./NBodySim_CPU_MPI/nbodycpumpi '+numMPImemgrps+' '+nburstburst+' '+InitialConditionsFolder+preffix+' '+OutputResultsFolder+preffix) endtime = time.time() print("that took "+ (endtime - starttime)+" seconds") ''' if True: print("launching renderer...") os.system("Renderer3D/Renderer3D "+OutputResultsFolder+"out_mpi_twocollisions_.data "+str(TotalNumPts/2)+" 0 1 1") if matplotlibAvailable and (MakePositionsVideo or MakeDistributionsVideo): print("beginning to make plots/video...") if MakePositionsVideo: MakeVideo(TotalNumPts, OutputResultsFolder+"out_MPI.data", "video_two_plummer_MPI_collision.avi", True, 8, UseImageMagickForFancierVideo) '''
gpl-3.0
rbalda/neural_ocr
env/lib/python2.7/site-packages/matplotlib/tests/test_skew.py
7
6965
""" Testing that skewed axes properly work """ from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools from matplotlib.externals import six from nose.tools import assert_true import numpy as np import matplotlib.pyplot as plt from matplotlib.testing.decorators import cleanup, image_comparison from matplotlib.axes import Axes import matplotlib.transforms as transforms import matplotlib.axis as maxis import matplotlib.spines as mspines import matplotlib.path as mpath import matplotlib.patches as mpatch from matplotlib.projections import register_projection # The sole purpose of this class is to look at the upper, lower, or total # interval as appropriate and see what parts of the tick to draw, if any. class SkewXTick(maxis.XTick): def draw(self, renderer): if not self.get_visible(): return renderer.open_group(self.__name__) lower_interval = self.axes.xaxis.lower_interval upper_interval = self.axes.xaxis.upper_interval if self.gridOn and transforms.interval_contains( self.axes.xaxis.get_view_interval(), self.get_loc()): self.gridline.draw(renderer) if transforms.interval_contains(lower_interval, self.get_loc()): if self.tick1On: self.tick1line.draw(renderer) if self.label1On: self.label1.draw(renderer) if transforms.interval_contains(upper_interval, self.get_loc()): if self.tick2On: self.tick2line.draw(renderer) if self.label2On: self.label2.draw(renderer) renderer.close_group(self.__name__) # This class exists to provide two separate sets of intervals to the tick, # as well as create instances of the custom tick class SkewXAxis(maxis.XAxis): def __init__(self, *args, **kwargs): maxis.XAxis.__init__(self, *args, **kwargs) self.upper_interval = 0.0, 1.0 def _get_tick(self, major): return SkewXTick(self.axes, 0, '', major=major) @property def lower_interval(self): return self.axes.viewLim.intervalx def get_view_interval(self): return self.upper_interval[0], self.axes.viewLim.intervalx[1] # This class exists to calculate the separate data range of the # upper X-axis and draw the spine there. It also provides this range # to the X-axis artist for ticking and gridlines class SkewSpine(mspines.Spine): def __init__(self, axes, spine_type): if spine_type == 'bottom': loc = 0.0 else: loc = 1.0 mspines.Spine.__init__(self, axes, spine_type, mpath.Path([(13, loc), (13, loc)])) def _adjust_location(self): trans = self.axes.transDataToAxes.inverted() if self.spine_type == 'top': yloc = 1.0 else: yloc = 0.0 left = trans.transform_point((0.0, yloc))[0] right = trans.transform_point((1.0, yloc))[0] pts = self._path.vertices pts[0, 0] = left pts[1, 0] = right self.axis.upper_interval = (left, right) # This class handles registration of the skew-xaxes as a projection as well # as setting up the appropriate transformations. It also overrides standard # spines and axes instances as appropriate. class SkewXAxes(Axes): # The projection must specify a name. This will be used be the # user to select the projection, i.e. ``subplot(111, # projection='skewx')``. name = 'skewx' def _init_axis(self): #Taken from Axes and modified to use our modified X-axis self.xaxis = SkewXAxis(self) self.spines['top'].register_axis(self.xaxis) self.spines['bottom'].register_axis(self.xaxis) self.yaxis = maxis.YAxis(self) self.spines['left'].register_axis(self.yaxis) self.spines['right'].register_axis(self.yaxis) def _gen_axes_spines(self): spines = {'top': SkewSpine(self, 'top'), 'bottom': mspines.Spine.linear_spine(self, 'bottom'), 'left': mspines.Spine.linear_spine(self, 'left'), 'right': mspines.Spine.linear_spine(self, 'right')} return spines def _set_lim_and_transforms(self): """ This is called once when the plot is created to set up all the transforms for the data, text and grids. """ rot = 30 #Get the standard transform setup from the Axes base class Axes._set_lim_and_transforms(self) # Need to put the skew in the middle, after the scale and limits, # but before the transAxes. This way, the skew is done in Axes # coordinates thus performing the transform around the proper origin # We keep the pre-transAxes transform around for other users, like the # spines for finding bounds self.transDataToAxes = (self.transScale + (self.transLimits + transforms.Affine2D().skew_deg(rot, 0))) # Create the full transform from Data to Pixels self.transData = self.transDataToAxes + self.transAxes # Blended transforms like this need to have the skewing applied using # both axes, in axes coords like before. self._xaxis_transform = (transforms.blended_transform_factory( self.transScale + self.transLimits, transforms.IdentityTransform()) + transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes # Now register the projection with matplotlib so the user can select # it. register_projection(SkewXAxes) @image_comparison(baseline_images=['skew_axes'], remove_text=True) def test_set_line_coll_dash_image(): fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='skewx') ax.set_xlim(-50, 50) ax.set_ylim(50, -50) ax.grid(True) # An example of a slanted line at constant X l = ax.axvline(0, color='b') @image_comparison(baseline_images=['skew_rects'], remove_text=True) def test_skew_rectange(): fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(16, 12)) axes = axes.flat rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2)) axes[0].set_xlim([-4, 4]) axes[0].set_ylim([-4, 4]) axes[0].set_aspect('equal') for ax, (xrots, yrots) in zip(axes, rotations): xdeg, ydeg = 45 * xrots, 45 * yrots t = transforms.Affine2D().skew_deg(xdeg, ydeg) ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg)) ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2, transform=t + ax.transData, alpha=0.5, facecolor='coral')) plt.subplots_adjust(wspace=0, left=0, right=1, bottom=0) if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
mit
openfisca/openfisca-qt
openfisca_qt/scripts/validation/check_consistency_tests.py
1
4125
# -*- coding:utf-8 -*- # Created on 17 févr. 2013 # This file is part of OpenFisca. # OpenFisca is a socio-fiscal microsimulation software # Copyright © #2013 Clément Schaff, Mahdi Ben Jelloul # Licensed under the terms of the GVPLv3 or later license # (see openfisca/__init__.py for details) from openfisca_core import model from openfisca_core.columns import EnumCol from openfisca_core.simulations import SurveySimulation from pandas import concat def check_entities(simulation): is_ok = True message = None survey = simulation.survey for entity in model.ENTITIES_INDEX: id = survey.table['id' + entity] head = survey.table['qui' + entity] df = concat([id, head],axis=1) grouped_by_id = df.groupby(id) def is_there_head(group): dummy = (group == 0).sum() return dummy headcount = grouped_by_id["qui"+entity].aggregate({entity + " heads" : is_there_head}) result = headcount[headcount[entity + " heads"] != 1] if len(result) != 0: is_ok = False return is_ok, message def check_inputs_enumcols(simulation): """ Check that the enumcols are consistent with data in the survey dataframe Parameters ---------- simulation : SurveySimulation The simulation to check Returns ------- is_ok : bool True or False according to tests message : string """ # TODO: eventually should be a method of SurveySimulation specific for france is_ok = True message = None survey = simulation.input_table for var, varcol in survey.column_by_name.iteritems(): if isinstance(varcol, EnumCol): try: x = sorted(varcol.enum._nums.values()) if set(survey.table[var].unique()) > set(varcol.enum._nums.values()): print "Wrong nums for %s" %var print varcol.enum._nums print sorted(survey.table[var].unique()) is_ok = False except: is_ok = False print var print "Wrong nums" print varcol.enum print sorted(survey.table[var].unique()) print "\n" try: x = varcol.enum._vars except: is_ok = False print var print "wrong vars" print varcol.enum print sorted(survey.table[var].unique()) print "\n" return is_ok, message def check_weights(simulation): """ Check weights positiveness Parameters ---------- simulation : SurveySimulation The simulation to check Returns ------- is_ok : boolean True or False according to tests message : string, None if is_ok is True error message """ is_ok = True message = None survey = simulation.survey WEIGHT = model.WEIGHT weight = survey.get_value(WEIGHT) nb = sum(weight<=0) if nb != 0: is_ok = False message = "%i weights are less than or equal to zero" % nb return is_ok, message def toto(simulation): survey = simulation.survey # verifying the age of childrens quifam = survey.get_value('quifam') age = survey.get_value('age') if sum((quifam >= 2) & (age >= 21)) != 0: print "they are kids that are of age >= 21" # Problemes # enfants de plus de 21 ans et parents à charge dans les familles avec quifam=0 # idmen = survey.get_value('idmen') # from numpy import max as max_ # print max_(idmen) if __name__ == '__main__': year = 2006 simulation = SurveySimulation() simulation.set_config(year = year) simulation.set_param() simulation.set_survey() ok, message = check_inputs_enumcols(simulation) if not ok: print message ok, message = check_entities(simulation) if not ok: print message ok, message = check_weights(simulation) if not ok: print message
agpl-3.0
jkarnows/scikit-learn
sklearn/preprocessing/data.py
113
56747
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils.extmath import row_norms from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, min_max_axis, inplace_row_scale) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) std_ = _handle_zeros_in_scale(std_) else: std_ = None return mean_, std_ def _handle_zeros_in_scale(scale): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0: scale = 1. elif isinstance(scale, np.ndarray): scale[scale == 0.0] = 1.0 scale[~np.isfinite(scale)] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like or CSR matrix. The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) mean_, std_ = _mean_and_std( X, axis, with_mean=with_mean, with_std=with_std) if copy: X = X.copy() # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: Xr /= std_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # std_ is very small so that mean_2 = mean_1/std_ > 0, even if # mean_1 was close to zero. The problem is thus essentially due # to the lack of precision of mean_. A solution is then to # substract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min data_range = _handle_zeros_in_scale(data_range) self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. std_ : array of floats with shape [n_features] The standard deviation for each feature in the training set. Set to one if the standard deviation is zero for a given feature. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : array-like or CSR matrix with shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. """ X = check_array(X, accept_sparse='csr', copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.mean_ = None if self.with_std: var = mean_variance_axis(X, axis=0)[1] self.std_ = np.sqrt(var) self.std_ = _handle_zeros_in_scale(self.std_) else: self.std_ = None return self else: self.mean_, self.std_ = _mean_and_std( X, axis=0, with_mean=self.with_mean, with_std=self.with_std) return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.std_ is not None: inplace_column_scale(X, 1 / self.std_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.std_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.std_ is not None: inplace_column_scale(X, self.std_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.std_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, copy=True): self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) scales = np.maximum(np.abs(mins), np.abs(maxs)) else: scales = np.abs(X).max(axis=0) scales = np.array(scales) scales = scales.reshape(-1) self.scale_ = _handle_zeros_in_scale(scales) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : array-like or CSR matrix. The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) else: inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MaxAbsScaler(copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) elif self.axis == 0: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like. The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0, 0, 1], [ 1, 2, 3, 4, 6, 9], [ 1, 4, 5, 16, 20, 25]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0], [ 1, 2, 3, 6], [ 1, 4, 5, 20]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array with shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms) X /= norms[:, np.newaxis] if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K) if copy: K = K.copy() K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : array or scipy.sparse matrix with shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo']) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : maximum value for all features. - array : maximum value per feature. categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'float'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those catgorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape=(n_samples, n_features) Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
geminy/aidear
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/webrtc/video/full_stack_plot.py
10
13183
#!/usr/bin/env python # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. """Generate graphs for data generated by loopback tests. Usage examples: Show end to end time for a single full stack test. ./full_stack_plot.py -df end_to_end -o 600 --frames 1000 vp9_data.txt Show simultaneously PSNR and encoded frame size for two different runs of full stack test. Averaged over a cycle of 200 frames. Used e.g. for screenshare slide test. ./full_stack_plot.py -c 200 -df psnr -drf encoded_frame_size \\ before.txt after.txt Similar to the previous test, but multiple graphs. ./full_stack_plot.py -c 200 -df psnr vp8.txt vp9.txt --next \\ -c 200 -df sender_time vp8.txt vp9.txt --next \\ -c 200 -df end_to_end vp8.txt vp9.txt """ import argparse from collections import defaultdict import itertools import sys import matplotlib.pyplot as plt import numpy # Fields DROPPED = 0 INPUT_TIME = 1 # ms (timestamp) SEND_TIME = 2 # ms (timestamp) RECV_TIME = 3 # ms (timestamp) RENDER_TIME = 4 # ms (timestamp) ENCODED_FRAME_SIZE = 5 # bytes PSNR = 6 SSIM = 7 ENCODE_TIME = 8 # ms (time interval) TOTAL_RAW_FIELDS = 9 SENDER_TIME = TOTAL_RAW_FIELDS + 0 RECEIVER_TIME = TOTAL_RAW_FIELDS + 1 END_TO_END = TOTAL_RAW_FIELDS + 2 RENDERED_DELTA = TOTAL_RAW_FIELDS + 3 FIELD_MASK = 255 # Options HIDE_DROPPED = 256 RIGHT_Y_AXIS = 512 # internal field id, field name, title _fields = [ # Raw (DROPPED, "dropped", "dropped"), (INPUT_TIME, "input_time_ms", "input time"), (SEND_TIME, "send_time_ms", "send time"), (RECV_TIME, "recv_time_ms", "recv time"), (ENCODED_FRAME_SIZE, "encoded_frame_size", "encoded frame size"), (PSNR, "psnr", "PSNR"), (SSIM, "ssim", "SSIM"), (RENDER_TIME, "render_time_ms", "render time"), (ENCODE_TIME, "encode_time_ms", "encode time"), # Auto-generated (SENDER_TIME, "sender_time", "sender time"), (RECEIVER_TIME, "receiver_time", "receiver time"), (END_TO_END, "end_to_end", "end to end"), (RENDERED_DELTA, "rendered_delta", "rendered delta"), ] name_to_id = {field[1]: field[0] for field in _fields} id_to_title = {field[0]: field[2] for field in _fields} def field_arg_to_id(arg): if arg == "none": return None if arg in name_to_id: return name_to_id[arg] if arg + "_ms" in name_to_id: return name_to_id[arg + "_ms"] raise Exception("Unrecognized field name \"{}\"".format(arg)) class PlotLine(object): """Data for a single graph line.""" def __init__(self, label, values, flags): self.label = label self.values = values self.flags = flags class Data(object): """Object representing one full stack test.""" def __init__(self, filename): self.title = "" self.length = 0 self.samples = defaultdict(list) self._read_samples(filename) def _read_samples(self, filename): """Reads graph data from the given file.""" f = open(filename) it = iter(f) self.title = it.next().strip() self.length = int(it.next()) field_names = [name.strip() for name in it.next().split()] field_ids = [name_to_id[name] for name in field_names] for field_id in field_ids: self.samples[field_id] = [0.0] * self.length for sample_id in xrange(self.length): for col, value in enumerate(it.next().split()): self.samples[field_ids[col]][sample_id] = float(value) self._subtract_first_input_time() self._generate_additional_data() f.close() def _subtract_first_input_time(self): offset = self.samples[INPUT_TIME][0] for field in [INPUT_TIME, SEND_TIME, RECV_TIME, RENDER_TIME]: if field in self.samples: self.samples[field] = [x - offset for x in self.samples[field]] def _generate_additional_data(self): """Calculates sender time, receiver time etc. from the raw data.""" s = self.samples last_render_time = 0 for field_id in [SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA]: s[field_id] = [0] * self.length for k in range(self.length): s[SENDER_TIME][k] = s[SEND_TIME][k] - s[INPUT_TIME][k] decoded_time = s[RENDER_TIME][k] s[RECEIVER_TIME][k] = decoded_time - s[RECV_TIME][k] s[END_TO_END][k] = decoded_time - s[INPUT_TIME][k] if not s[DROPPED][k]: if k > 0: s[RENDERED_DELTA][k] = decoded_time - last_render_time last_render_time = decoded_time def _hide(self, values): """ Replaces values for dropped frames with None. These values are then skipped by the plot() method. """ return [None if self.samples[DROPPED][k] else values[k] for k in range(len(values))] def add_samples(self, config, target_lines_list): """Creates graph lines from the current data set with given config.""" for field in config.fields: # field is None means the user wants just to skip the color. if field is None: target_lines_list.append(None) continue field_id = field & FIELD_MASK values = self.samples[field_id] if field & HIDE_DROPPED: values = self._hide(values) target_lines_list.append(PlotLine( self.title + " " + id_to_title[field_id], values, field & ~FIELD_MASK)) def average_over_cycle(values, length): """ Returns the list: [ avg(values[0], values[length], ...), avg(values[1], values[length + 1], ...), ... avg(values[length - 1], values[2 * length - 1], ...), ] Skips None values when calculating the average value. """ total = [0.0] * length count = [0] * length for k in range(len(values)): if values[k] is not None: total[k % length] += values[k] count[k % length] += 1 result = [0.0] * length for k in range(length): result[k] = total[k] / count[k] if count[k] else None return result class PlotConfig(object): """Object representing a single graph.""" def __init__(self, fields, data_list, cycle_length=None, frames=None, offset=0, output_filename=None, title="Graph"): self.fields = fields self.data_list = data_list self.cycle_length = cycle_length self.frames = frames self.offset = offset self.output_filename = output_filename self.title = title def plot(self, ax1): lines = [] for data in self.data_list: if not data: # Add None lines to skip the colors. lines.extend([None] * len(self.fields)) else: data.add_samples(self, lines) def _slice_values(values): if self.offset: values = values[self.offset:] if self.frames: values = values[:self.frames] return values length = None for line in lines: if line is None: continue line.values = _slice_values(line.values) if self.cycle_length: line.values = average_over_cycle(line.values, self.cycle_length) if length is None: length = len(line.values) elif length != len(line.values): raise Exception("All arrays should have the same length!") ax1.set_xlabel("Frame", fontsize="large") if any(line.flags & RIGHT_Y_AXIS for line in lines if line): ax2 = ax1.twinx() ax2.set_xlabel("Frame", fontsize="large") else: ax2 = None # Have to implement color_cycle manually, due to two scales in a graph. color_cycle = ["b", "r", "g", "c", "m", "y", "k"] color_iter = itertools.cycle(color_cycle) for line in lines: if not line: color_iter.next() continue if self.cycle_length: x = numpy.array(range(self.cycle_length)) else: x = numpy.array(range(self.offset, self.offset + len(line.values))) y = numpy.array(line.values) ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1 ax.plot(x, y, "o-", label=line.label, markersize=3.0, linewidth=1.0, color=color_iter.next()) ax1.grid(True) if ax2: ax1.legend(loc="upper left", shadow=True, fontsize="large") ax2.legend(loc="upper right", shadow=True, fontsize="large") else: ax1.legend(loc="best", shadow=True, fontsize="large") def load_files(filenames): result = [] for filename in filenames: if filename in load_files.cache: result.append(load_files.cache[filename]) else: data = Data(filename) load_files.cache[filename] = data result.append(data) return result load_files.cache = {} def get_parser(): class CustomAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if "ordered_args" not in namespace: namespace.ordered_args = [] namespace.ordered_args.append((self.dest, values)) parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( "-c", "--cycle_length", nargs=1, action=CustomAction, type=int, help="Cycle length over which to average the values.") parser.add_argument( "-f", "--field", nargs=1, action=CustomAction, help="Name of the field to show. Use 'none' to skip a color.") parser.add_argument("-r", "--right", nargs=0, action=CustomAction, help="Use right Y axis for given field.") parser.add_argument("-d", "--drop", nargs=0, action=CustomAction, help="Hide values for dropped frames.") parser.add_argument("-o", "--offset", nargs=1, action=CustomAction, type=int, help="Frame offset.") parser.add_argument("-n", "--next", nargs=0, action=CustomAction, help="Separator for multiple graphs.") parser.add_argument( "--frames", nargs=1, action=CustomAction, type=int, help="Frame count to show or take into account while averaging.") parser.add_argument("-t", "--title", nargs=1, action=CustomAction, help="Title of the graph.") parser.add_argument( "-O", "--output_filename", nargs=1, action=CustomAction, help="Use to save the graph into a file. " "Otherwise, a window will be shown.") parser.add_argument( "files", nargs="+", action=CustomAction, help="List of text-based files generated by loopback tests.") return parser def _plot_config_from_args(args, graph_num): # Pylint complains about using kwargs, so have to do it this way. cycle_length = None frames = None offset = 0 output_filename = None title = "Graph" fields = [] files = [] mask = 0 for key, values in args: if key == "cycle_length": cycle_length = values[0] elif key == "frames": frames = values[0] elif key == "offset": offset = values[0] elif key == "output_filename": output_filename = values[0] elif key == "title": title = values[0] elif key == "drop": mask |= HIDE_DROPPED elif key == "right": mask |= RIGHT_Y_AXIS elif key == "field": field_id = field_arg_to_id(values[0]) fields.append(field_id | mask if field_id is not None else None) mask = 0 # Reset mask after the field argument. elif key == "files": files.extend(values) if not files: raise Exception("Missing file argument(s) for graph #{}".format(graph_num)) if not fields: raise Exception("Missing field argument(s) for graph #{}".format(graph_num)) return PlotConfig(fields, load_files(files), cycle_length=cycle_length, frames=frames, offset=offset, output_filename=output_filename, title=title) def plot_configs_from_args(args): """Generates plot configs for given command line arguments.""" # The way it works: # First we detect separators -n/--next and split arguments into groups, one # for each plot. For each group, we partially parse it with # argparse.ArgumentParser, modified to remember the order of arguments. # Then we traverse the argument list and fill the PlotConfig. args = itertools.groupby(args, lambda x: x in ["-n", "--next"]) args = list(list(group) for match, group in args if not match) parser = get_parser() plot_configs = [] for index, raw_args in enumerate(args): graph_args = parser.parse_args(raw_args).ordered_args plot_configs.append(_plot_config_from_args(graph_args, index)) return plot_configs def show_or_save_plots(plot_configs): for config in plot_configs: fig = plt.figure(figsize=(14.0, 10.0)) ax = fig.add_subplot(1, 1, 1) plt.title(config.title) config.plot(ax) if config.output_filename: print "Saving to", config.output_filename fig.savefig(config.output_filename) plt.close(fig) plt.show() if __name__ == "__main__": show_or_save_plots(plot_configs_from_args(sys.argv[1:]))
gpl-3.0
appapantula/scikit-learn
sklearn/linear_model/stochastic_gradient.py
130
50966
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification and regression using Stochastic Gradient Descent (SGD).""" import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from ..externals.joblib import Parallel, delayed from .base import LinearClassifierMixin, SparseCoefMixin from ..base import BaseEstimator, RegressorMixin from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import (check_array, check_random_state, check_X_y, deprecated) from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted from ..externals import six from .sgd_fast import plain_sgd, average_sgd from ..utils.fixes import astype from ..utils.seq_dataset import ArrayDataset, CSRDataset from ..utils import compute_class_weight from .sgd_fast import Hinge from .sgd_fast import SquaredHinge from .sgd_fast import Log from .sgd_fast import ModifiedHuber from .sgd_fast import SquaredLoss from .sgd_fast import Huber from .sgd_fast import EpsilonInsensitive from .sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3, "pa1": 4, "pa2": 5} PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} SPARSE_INTERCEPT_DECAY = 0.01 """For sparse data intercept updates are scaled by this decay factor to avoid intercept oscillation.""" DEFAULT_EPSILON = 0.1 """Default value of ``epsilon`` parameter. """ class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)): """Base class for SGD classification and regression.""" def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, warm_start=False, average=False): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.n_iter = n_iter self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.warm_start = warm_start self.average = average self._validate_params() self.coef_ = None if self.average > 0: self.standard_coef_ = None self.average_coef_ = None # iteration count for learning rate schedule # must not be int (e.g. if ``learning_rate=='optimal'``) self.t_ = None def set_params(self, *args, **kwargs): super(BaseSGD, self).set_params(*args, **kwargs) self._validate_params() return self @abstractmethod def fit(self, X, y): """Fit model.""" def _validate_params(self): """Validate input params. """ if not isinstance(self.shuffle, bool): raise ValueError("shuffle must be either True or False") if self.n_iter <= 0: raise ValueError("n_iter must be > zero") if not (0.0 <= self.l1_ratio <= 1.0): raise ValueError("l1_ratio must be in [0, 1]") if self.alpha < 0.0: raise ValueError("alpha must be >= 0") if self.learning_rate in ("constant", "invscaling"): if self.eta0 <= 0.0: raise ValueError("eta0 must be > 0") # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if self.loss not in self.loss_functions: raise ValueError("The loss %s is not supported. " % self.loss) def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``. """ try: loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) except KeyError: raise ValueError("The loss %s is not supported. " % loss) def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] except KeyError: raise ValueError("learning rate %s " "is not supported. " % learning_rate) def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] except KeyError: raise ValueError("Penalty %s is not supported. " % penalty) def _validate_sample_weight(self, sample_weight, n_samples): """Set the sample weight array.""" if sample_weight is None: # uniform sample weights sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: # user-provided array sample_weight = np.asarray(sample_weight, dtype=np.float64, order="C") if sample_weight.shape[0] != n_samples: raise ValueError("Shapes of X and sample_weight do not match.") return sample_weight def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order="C") # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes, ): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ for binary problem if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not " "match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ for binary problem if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init.reshape(1,) else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order="C") def _make_dataset(X, y_i, sample_weight): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. """ if sp.issparse(X): dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight) intercept_decay = SPARSE_INTERCEPT_DECAY else: dataset = ArrayDataset(X, y_i, sample_weight) intercept_decay = 1.0 return dataset, intercept_decay def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est.standard_coef_.ravel() intercept = est.standard_intercept_[0] average_coef = est.average_coef_.ravel() average_intercept = est.average_intercept_[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est.standard_coef_[i] intercept = est.standard_intercept_[i] average_coef = est.average_coef_[i] average_intercept = est.average_intercept_[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter, pos_weight, neg_weight, sample_weight): """Fit a single binary classifier. The i'th class is considered the "positive" class. """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = \ _prepare_fit_binary(est, y, i) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] dataset, intercept_decay = _make_dataset(X, y_i, sample_weight) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) # XXX should have random_state_! random_state = check_random_state(est.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if not est.average: return plain_sgd(coef, intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay) else: standard_coef, standard_intercept, average_coef, \ average_intercept = average_sgd(coef, intercept, average_coef, average_intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay, est.average) if len(est.classes_) == 2: est.average_intercept_[0] = average_intercept else: est.average_intercept_[i] = average_intercept return standard_coef, standard_intercept class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD, LinearClassifierMixin)): loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log": (Log, ), "modified_huber": (ModifiedHuber, ), "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) self.class_weight = class_weight self.classes_ = None self.n_jobs = int(n_jobs) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, classes, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape self._validate_params() _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight(self.class_weight, self.classes_, y) sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None or coef_init is not None: self._allocate_parameter_mem(n_classes, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) self.loss_function = self._get_loss_function(loss) if self.t_ is None: self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) elif n_classes == 2: self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) else: raise ValueError("The number of class labels must be " "greater than one.") return self def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if hasattr(self, "classes_"): self.classes_ = None X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class classes = np.unique(y) if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, classes, sample_weight, coef_init, intercept_init) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, n_iter): """Fit a binary classifier on X and y. """ coef, intercept = fit_binary(self, 1, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += n_iter * X.shape[0] # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self.average_coef_.reshape(1, -1) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, -1) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, n_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All. """ # Use joblib to fit OvA in parallel. result = Parallel(n_jobs=self.n_jobs, backend="threading", verbose=self.verbose)( delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[i], 1., sample_weight) for i in range(len(self.classes_))) for i, (_, intercept) in enumerate(result): self.intercept_[i] = intercept self.t_ += n_iter * X.shape[0] if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ def partial_fit(self, X, y, classes=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ if self.class_weight in ['balanced', 'auto']: raise ValueError("class_weight '{0}' is not supported for " "partial_fit. In order to use 'balanced' weights, " "use compute_class_weight('{0}', classes, y). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.".format(self.class_weight)) return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the contructor) if class_weight is specified Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin): """Linear classifiers (SVM, logistic regression, a.o.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning, see the partial_fit method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\ 'perceptron', or a regression loss: 'squared_loss', 'huber',\ 'epsilon_insensitive', or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'hinge', which gives a linear SVM. The 'log' loss gives logistic regression, a probabilistic classifier. 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. 'squared_hinge' is like hinge but is quadratically penalized. 'perceptron' is the linear loss used by the perceptron algorithm. The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. learning_rate : string, optional The learning rate schedule: constant: eta = eta0 optimal: eta = 1.0 / (t + t0) [default] invscaling: eta = eta0 / pow(t, power_t) where t0 is chosen by a heuristic proposed by Leon Bottou. eta0 : double The initial learning rate for the 'constant' or 'invscaling' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\ n_features) Weights assigned to the features. intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> clf = linear_model.SGDClassifier() >>> clf.fit(X, Y) ... #doctest: +NORMALIZE_WHITESPACE SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=True, verbose=0, warm_start=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- LinearSVC, LogisticRegression, Perceptron """ def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(SGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, class_weight=class_weight, warm_start=warm_start, average=average) def _check_proba(self): check_is_fitted(self, "t_") if self.loss not in ("log", "modified_huber"): raise AttributeError("probability estimates are not available for" " loss=%r" % self.loss) @property def predict_proba(self): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ self._check_proba() return self._predict_proba def _predict_proba(self, X): if self.loss == "log": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = (len(self.classes_) == 2) scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1. prob /= 2. if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = (prob_sum == 0) if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError("predict_(log_)proba only supported when" " loss='log' or loss='modified_huber' " "(%r given)" % self.loss) @property def predict_log_proba(self): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) class BaseSGDRegressor(BaseSGD, RegressorMixin): loss_functions = { "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64) y = astype(y, np.float64, copy=False) n_samples, n_features = X.shape self._validate_params() # Allocate datastructures from input arguments sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) if self.average > 0 and self.average_coef_ is None: self.average_coef_ = np.zeros(n_features, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor(X, y, alpha, C, loss, learning_rate, sample_weight, n_iter) return self def partial_fit(self, X, y, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None) def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_intercept_ = self.intercept_ self.standard_coef_ = self.coef_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None return self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, sample_weight, coef_init, intercept_init) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _fit_regressor(self, X, y, alpha, C, loss, learning_rate, sample_weight, n_iter): dataset, intercept_decay = _make_dataset(X, y, sample_weight) loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if self.t_ is None: self.t_ = 1.0 random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if self.average > 0: self.standard_coef_, self.standard_intercept_, \ self.average_coef_, self.average_intercept_ =\ average_sgd(self.standard_coef_, self.standard_intercept_[0], self.average_coef_, self.average_intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay, self.average) self.average_intercept_ = np.atleast_1d(self.average_intercept_) self.standard_intercept_ = np.atleast_1d(self.standard_intercept_) self.t_ += n_iter * X.shape[0] if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.intercept_ = self.standard_intercept_ else: self.coef_, self.intercept_ = \ plain_sgd(self.coef_, self.intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay) self.t_ += n_iter * X.shape[0] self.intercept_ = np.atleast_1d(self.intercept_) class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \ or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'squared_loss' which refers to the ordinary least squares fit. 'huber' modifies 'squared_loss' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level. epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(alpha * t) invscaling: eta = eta0 / pow(t, power_t) [default] eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10 will`` begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (n_features,) Weights assigned to the features. intercept_ : array, shape (1,) The intercept term. average_coef_ : array, shape (n_features,) Averaged weights assigned to the features. average_intercept_ : array, shape (1,) The averaged intercept term. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.SGDRegressor() >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25, random_state=None, shuffle=True, verbose=0, warm_start=False) See also -------- Ridge, ElasticNet, Lasso, SVR """ def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(SGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average)
bsd-3-clause
hugobowne/scikit-learn
examples/svm/plot_svm_scale_c.py
44
5405
""" ============================================== Scaling the regularization parameter for SVCs ============================================== The following example illustrates the effect of scaling the regularization parameter when using :ref:`svm` for :ref:`classification <svm_classification>`. For SVC classification, we are interested in a risk minimization for the equation: .. math:: C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w) where - :math:`C` is used to set the amount of regularization - :math:`\mathcal{L}` is a `loss` function of our samples and our model parameters. - :math:`\Omega` is a `penalty` function of our model parameters If we consider the loss function to be the individual error per sample, then the data-fit term, or the sum of the error for each sample, will increase as we add more samples. The penalization term, however, will not increase. When using, for example, :ref:`cross validation <cross_validation>`, to set the amount of regularization with `C`, there will be a different amount of samples between the main problem and the smaller problems within the folds of the cross validation. Since our loss function is dependent on the amount of samples, the latter will influence the selected value of `C`. The question that arises is `How do we optimally adjust C to account for the different amount of training samples?` The figures below are used to illustrate the effect of scaling our `C` to compensate for the change in the number of samples, in the case of using an `l1` penalty, as well as the `l2` penalty. l1-penalty case ----------------- In the `l1` case, theory says that prediction consistency (i.e. that under given hypothesis, the estimator learned predicts as well as a model knowing the true distribution) is not possible because of the bias of the `l1`. It does say, however, that model consistency, in terms of finding the right set of non-zero parameters as well as their signs, can be achieved by scaling `C1`. l2-penalty case ----------------- The theory says that in order to achieve prediction consistency, the penalty parameter should be kept constant as the number of samples grow. Simulations ------------ The two figures below plot the values of `C` on the `x-axis` and the corresponding cross-validation scores on the `y-axis`, for several different fractions of a generated data-set. In the `l1` penalty case, the cross-validation-error correlates best with the test-error, when scaling our `C` with the number of samples, `n`, which can be seen in the first figure. For the `l2` penalty case, the best result comes from the case where `C` is not scaled. .. topic:: Note: Two separate datasets are used for the two different plots. The reason behind this is the `l1` case works better on sparse data, while `l2` is better suited to the non-sparse case. """ print(__doc__) # Author: Andreas Mueller <amueller@ais.uni-bonn.de> # Jaques Grobler <jaques.grobler@inria.fr> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.svm import LinearSVC from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.utils import check_random_state from sklearn import datasets rnd = check_random_state(1) # set up dataset n_samples = 100 n_features = 300 # l1 data (only 5 informative features) X_1, y_1 = datasets.make_classification(n_samples=n_samples, n_features=n_features, n_informative=5, random_state=1) # l2 data: non sparse, but less features y_2 = np.sign(.5 - rnd.rand(n_samples)) X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis] X_2 += 5 * rnd.randn(n_samples, n_features / 5) clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False, tol=1e-3), np.logspace(-2.3, -1.3, 10), X_1, y_1), (LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=1e-4), np.logspace(-4.5, -2, 10), X_2, y_2)] colors = ['navy', 'cyan', 'darkorange'] lw = 2 for fignum, (clf, cs, X, y) in enumerate(clf_sets): # set up the plot for each regressor plt.figure(fignum, figsize=(9, 10)) for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]): param_grid = dict(C=cs) # To get nice curve, we need a large number of iterations to # reduce the variance grid = GridSearchCV(clf, refit=False, param_grid=param_grid, cv=ShuffleSplit(train_size=train_size, n_iter=250, random_state=1)) grid.fit(X, y) scores = [x[1] for x in grid.grid_scores_] scales = [(1, 'No scaling'), ((n_samples * train_size), '1/n_samples'), ] for subplotnum, (scaler, name) in enumerate(scales): plt.subplot(2, 1, subplotnum + 1) plt.xlabel('C') plt.ylabel('CV Score') grid_cs = cs * float(scaler) # scale the C's plt.semilogx(grid_cs, scores, label="fraction %.2f" % train_size, color=colors[k], lw=lw) plt.title('scaling=%s, penalty=%s, loss=%s' % (name, clf.penalty, clf.loss)) plt.legend(loc="best") plt.show()
bsd-3-clause
ldirer/scikit-learn
sklearn/linear_model/tests/test_logistic.py
5
48825
import numpy as np import scipy.sparse as sp from scipy import linalg, optimize, sparse from sklearn.datasets import load_iris, make_classification from sklearn.metrics import log_loss from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import LabelEncoder from sklearn.utils import compute_class_weight from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import raises from sklearn.exceptions import ConvergenceWarning from sklearn.linear_model.logistic import ( LogisticRegression, logistic_regression_path, LogisticRegressionCV, _logistic_loss_and_grad, _logistic_grad_hess, _multinomial_grad_hess, _logistic_loss, ) X = [[-1, 0], [0, 1], [1, 1]] X_sp = sp.csr_matrix(X) Y1 = [0, 1, 1] Y2 = [2, 1, 0] iris = load_iris() def check_predictions(clf, X, y): """Check that the model is able to fit the classification data""" n_samples = len(y) classes = np.unique(y) n_classes = classes.shape[0] predicted = clf.fit(X, y).predict(X) assert_array_equal(clf.classes_, classes) assert_equal(predicted.shape, (n_samples,)) assert_array_equal(predicted, y) probabilities = clf.predict_proba(X) assert_equal(probabilities.shape, (n_samples, n_classes)) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) assert_array_equal(probabilities.argmax(axis=1), y) def test_predict_2_classes(): # Simple sanity check on a 2 classes dataset # Make sure it predicts the correct result on simple datasets. check_predictions(LogisticRegression(random_state=0), X, Y1) check_predictions(LogisticRegression(random_state=0), X_sp, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X_sp, Y1) def test_error(): # Test for appropriate exception on errors msg = "Penalty term must be positive" assert_raise_message(ValueError, msg, LogisticRegression(C=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LogisticRegression(C="test").fit, X, Y1) for LR in [LogisticRegression, LogisticRegressionCV]: msg = "Tolerance for stopping criteria must be positive" assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1) msg = "Maximum number of iteration must be positive" assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1) def test_predict_3_classes(): check_predictions(LogisticRegression(C=10), X, Y2) check_predictions(LogisticRegression(C=10), X_sp, Y2) def test_predict_iris(): # Test logistic regression with the iris dataset n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] # Test that both multinomial and OvR solvers handle # multiclass data correctly and give good accuracy # score (>0.95) for the training data. for clf in [LogisticRegression(C=len(iris.data)), LogisticRegression(C=len(iris.data), solver='lbfgs', multi_class='multinomial'), LogisticRegression(C=len(iris.data), solver='newton-cg', multi_class='multinomial'), LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2, multi_class='ovr', random_state=42), LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2, multi_class='ovr', random_state=42) ]: clf.fit(iris.data, target) assert_array_equal(np.unique(target), clf.classes_) pred = clf.predict(iris.data) assert_greater(np.mean(pred == target), .95) probabilities = clf.predict_proba(iris.data) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) pred = iris.target_names[probabilities.argmax(axis=1)] assert_greater(np.mean(pred == target), .95) def test_multinomial_validation(): for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']: lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial') assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1]) def test_check_solver_option(): X, y = iris.data, iris.target for LR in [LogisticRegression, LogisticRegressionCV]: msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs" " and sag solvers, got wrong_name") lr = LR(solver="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) msg = "multi_class should be either multinomial or ovr, got wrong_name" lr = LR(solver='newton-cg', multi_class="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) # only 'liblinear' solver msg = "Solver liblinear does not support a multinomial backend." lr = LR(solver='liblinear', multi_class='multinomial') assert_raise_message(ValueError, msg, lr.fit, X, y) # all solvers except 'liblinear' for solver in ['newton-cg', 'lbfgs', 'sag']: msg = ("Solver %s supports only l2 penalties, got l1 penalty." % solver) lr = LR(solver=solver, penalty='l1') assert_raise_message(ValueError, msg, lr.fit, X, y) for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']: msg = ("Solver %s supports only dual=False, got dual=True" % solver) lr = LR(solver=solver, dual=True) assert_raise_message(ValueError, msg, lr.fit, X, y) def test_multinomial_binary(): # Test multinomial LR on a binary problem. target = (iris.target > 0).astype(np.intp) target = np.array(["setosa", "not-setosa"])[target] for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']: clf = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=2000) clf.fit(iris.data, target) assert_equal(clf.coef_.shape, (1, iris.data.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_array_equal(clf.predict(iris.data), target) mlr = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, fit_intercept=False) mlr.fit(iris.data, target) pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] assert_greater(np.mean(pred == target), .9) def test_sparsify(): # Test sparsify and densify members. n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] clf = LogisticRegression(random_state=0).fit(iris.data, target) pred_d_d = clf.decision_function(iris.data) clf.sparsify() assert_true(sp.issparse(clf.coef_)) pred_s_d = clf.decision_function(iris.data) sp_data = sp.coo_matrix(iris.data) pred_s_s = clf.decision_function(sp_data) clf.densify() pred_d_s = clf.decision_function(sp_data) assert_array_almost_equal(pred_d_d, pred_s_d) assert_array_almost_equal(pred_d_d, pred_s_s) assert_array_almost_equal(pred_d_d, pred_d_s) def test_inconsistent_input(): # Test that an exception is raised on inconsistent input rng = np.random.RandomState(0) X_ = rng.random_sample((5, 10)) y_ = np.ones(X_.shape[0]) y_[0] = 0 clf = LogisticRegression(random_state=0) # Wrong dimensions for training data y_wrong = y_[:-1] assert_raises(ValueError, clf.fit, X, y_wrong) # Wrong dimensions for test data assert_raises(ValueError, clf.fit(X_, y_).predict, rng.random_sample((3, 12))) def test_write_parameters(): # Test that we can write to coef_ and intercept_ clf = LogisticRegression(random_state=0) clf.fit(X, Y1) clf.coef_[:] = 0 clf.intercept_[:] = 0 assert_array_almost_equal(clf.decision_function(X), 0) @raises(ValueError) def test_nan(): # Test proper NaN handling. # Regression test for Issue #252: fit used to go into an infinite loop. Xnan = np.array(X, dtype=np.float64) Xnan[0, 1] = np.nan LogisticRegression(random_state=0).fit(Xnan, Y1) def test_consistency_path(): # Test that the path algorithm is consistent rng = np.random.RandomState(0) X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) y = [1] * 100 + [-1] * 100 Cs = np.logspace(0, 4, 10) f = ignore_warnings # can't test with fit_intercept=True since LIBLINEAR # penalizes the intercept for solver in ['sag', 'saga']: coefs, Cs, _ = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver, max_iter=1000, random_state=0) for i, C in enumerate(Cs): lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5, solver=solver, random_state=0) lr.fit(X, y) lr_coef = lr.coef_.ravel() assert_array_almost_equal(lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver) # test for fit_intercept=True for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'): Cs = [1e3] coefs, Cs, _ = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver, intercept_scaling=10000., random_state=0) lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4, intercept_scaling=10000., random_state=0) lr.fit(X, y) lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) assert_array_almost_equal(lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver) def test_liblinear_dual_random_state(): # random_state is relevant for liblinear solver only if dual=True X, y = make_classification(n_samples=20, random_state=0) lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr1.fit(X, y) lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr2.fit(X, y) lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15) lr3.fit(X, y) # same result for same random state assert_array_almost_equal(lr1.coef_, lr2.coef_) # different results for different random states msg = "Arrays are not almost equal to 6 decimals" assert_raise_message(AssertionError, msg, assert_array_almost_equal, lr1.coef_, lr3.coef_) def test_logistic_loss_and_grad(): X_ref, y = make_classification(n_samples=20, random_state=0) n_features = X_ref.shape[1] X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = np.zeros(n_features) # First check that our derivation of the grad is correct loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad, approx_grad, decimal=2) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad( w, X, y, alpha=1. ) assert_array_almost_equal(loss, loss_interp) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad_interp, approx_grad, decimal=2) def test_logistic_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features = 50, 5 X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = .1 * np.ones(n_features) # First check that _logistic_grad_hess is consistent # with _logistic_loss_and_grad loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(grad, grad_2) # Now check our hessian along the second direction of the grad vector = np.zeros_like(grad) vector[1] = 1 hess_col = hess(vector) # Computation of the Hessian is particularly fragile to numerical # errors when doing simple finite differences. Here we compute the # grad along a path in the direction of the vector and then use a # least-square regression to estimate the slope e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(approx_hess_col, hess_col, decimal=3) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.) loss_interp_2 = _logistic_loss(w, X, y, alpha=1.) grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(loss_interp, loss_interp_2) assert_array_almost_equal(grad_interp, grad_interp_2) def test_logistic_cv(): # test for LogisticRegressionCV object n_samples, n_features = 50, 5 rng = np.random.RandomState(0) X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False, solver='liblinear') lr_cv.fit(X_ref, y) lr = LogisticRegression(C=1., fit_intercept=False) lr.fit(X_ref, y) assert_array_almost_equal(lr.coef_, lr_cv.coef_) assert_array_equal(lr_cv.coef_.shape, (1, n_features)) assert_array_equal(lr_cv.classes_, [-1, 1]) assert_equal(len(lr_cv.classes_), 2) coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) assert_array_equal(lr_cv.Cs_.shape, (1,)) scores = np.asarray(list(lr_cv.scores_.values())) assert_array_equal(scores.shape, (1, 3, 1)) def test_multinomial_logistic_regression_string_inputs(): # Test with string labels for LogisticRegression(CV) n_samples, n_features, n_classes = 50, 5, 3 X_ref, y = make_classification(n_samples=n_samples, n_features=n_features, n_classes=n_classes, n_informative=3, random_state=0) y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y) # For numerical labels, let y values be taken from set (-1, 0, 1) y = np.array(y) - 1 # Test for string labels lr = LogisticRegression(solver='lbfgs', multi_class='multinomial') lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial') lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial') lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial') lr.fit(X_ref, y) lr_cv.fit(X_ref, y) lr_str.fit(X_ref, y_str) lr_cv_str.fit(X_ref, y_str) assert_array_almost_equal(lr.coef_, lr_str.coef_) assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo']) assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_) assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo']) assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo']) # The predictions should be in original labels assert_equal(sorted(np.unique(lr_str.predict(X_ref))), ['bar', 'baz', 'foo']) assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz', 'foo']) # Make sure class weights can be given with string labels lr_cv_str = LogisticRegression( solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0}, multi_class='multinomial').fit(X_ref, y_str) assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz']) def test_logistic_cv_sparse(): X, y = make_classification(n_samples=50, n_features=5, random_state=0) X[X < 1.0] = 0.0 csr = sp.csr_matrix(X) clf = LogisticRegressionCV(fit_intercept=True) clf.fit(X, y) clfs = LogisticRegressionCV(fit_intercept=True) clfs.fit(csr, y) assert_array_almost_equal(clfs.coef_, clf.coef_) assert_array_almost_equal(clfs.intercept_, clf.intercept_) assert_equal(clfs.C_, clf.C_) def test_intercept_logistic_helper(): n_samples, n_features = 10, 5 X, y = make_classification(n_samples=n_samples, n_features=n_features, random_state=0) # Fit intercept case. alpha = 1. w = np.ones(n_features + 1) grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha) loss_interp = _logistic_loss(w, X, y, alpha) # Do not fit intercept. This can be considered equivalent to adding # a feature vector of ones, i.e column of one vectors. X_ = np.hstack((X, np.ones(10)[:, np.newaxis])) grad, hess = _logistic_grad_hess(w, X_, y, alpha) loss = _logistic_loss(w, X_, y, alpha) # In the fit_intercept=False case, the feature vector of ones is # penalized. This should be taken care of. assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss) # Check gradient. assert_array_almost_equal(grad_interp[:n_features], grad[:n_features]) assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1]) rng = np.random.RandomState(0) grad = rng.rand(n_features + 1) hess_interp = hess_interp(grad) hess = hess(grad) assert_array_almost_equal(hess_interp[:n_features], hess[:n_features]) assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1]) def test_ovr_multinomial_iris(): # Test that OvR and multinomial are correct using the iris dataset. train, target = iris.data, iris.target n_samples, n_features = train.shape # The cv indices from stratified kfold (where stratification is done based # on the fine-grained iris classes, i.e, before the classes 0 and 1 are # conflated) is used for both clf and clf1 n_cv = 2 cv = StratifiedKFold(n_cv) precomputed_folds = list(cv.split(train, target)) # Train clf on the original dataset where classes 0 and 1 are separated clf = LogisticRegressionCV(cv=precomputed_folds) clf.fit(train, target) # Conflate classes 0 and 1 and train clf1 on this modified dataset clf1 = LogisticRegressionCV(cv=precomputed_folds) target_copy = target.copy() target_copy[target_copy == 0] = 1 clf1.fit(train, target_copy) # Ensure that what OvR learns for class2 is same regardless of whether # classes 0 and 1 are separated or not assert_array_almost_equal(clf.scores_[2], clf1.scores_[2]) assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_) assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_) # Test the shape of various attributes. assert_equal(clf.coef_.shape, (3, n_features)) assert_array_equal(clf.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1)) assert_equal(clf.Cs_.shape, (10,)) scores = np.asarray(list(clf.scores_.values())) assert_equal(scores.shape, (3, n_cv, 10)) # Test that for the iris data multinomial gives a better accuracy than OvR for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']: max_iter = 2000 if solver in ['sag', 'saga'] else 15 clf_multi = LogisticRegressionCV( solver=solver, multi_class='multinomial', max_iter=max_iter, random_state=42, tol=1e-5 if solver in ['sag', 'saga'] else 1e-2, cv=2) clf_multi.fit(train, target) multi_score = clf_multi.score(train, target) ovr_score = clf.score(train, target) assert_greater(multi_score, ovr_score) # Test attributes of LogisticRegressionCV assert_equal(clf.coef_.shape, clf_multi.coef_.shape) assert_array_equal(clf_multi.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1)) assert_equal(clf_multi.Cs_.shape, (10,)) scores = np.asarray(list(clf_multi.scores_.values())) assert_equal(scores.shape, (3, n_cv, 10)) def test_logistic_regression_solvers(): X, y = make_classification(n_features=10, n_informative=5, random_state=0) ncg = LogisticRegression(solver='newton-cg', fit_intercept=False) lbf = LogisticRegression(solver='lbfgs', fit_intercept=False) lib = LogisticRegression(fit_intercept=False) sag = LogisticRegression(solver='sag', fit_intercept=False, random_state=42) saga = LogisticRegression(solver='saga', fit_intercept=False, random_state=42) ncg.fit(X, y) lbf.fit(X, y) sag.fit(X, y) saga.fit(X, y) lib.fit(X, y) assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3) assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3) assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3) assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3) assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3) assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3) def test_logistic_regression_solvers_multiclass(): X, y = make_classification(n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0) tol = 1e-7 ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol) lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol) lib = LogisticRegression(fit_intercept=False, tol=tol) sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol, max_iter=1000, random_state=42) saga = LogisticRegression(solver='saga', fit_intercept=False, tol=tol, max_iter=10000, random_state=42) ncg.fit(X, y) lbf.fit(X, y) sag.fit(X, y) saga.fit(X, y) lib.fit(X, y) assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4) assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4) assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4) assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4) assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4) assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4) def test_logistic_regressioncv_class_weights(): for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]: n_classes = len(weight) for class_weight in (weight, 'balanced'): X, y = make_classification(n_samples=30, n_features=3, n_repeated=0, n_informative=3, n_redundant=0, n_classes=n_classes, random_state=0) clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1, fit_intercept=False, class_weight=class_weight) clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1, fit_intercept=False, class_weight=class_weight) clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1, fit_intercept=False, class_weight=class_weight) clf_sag = LogisticRegressionCV(solver='sag', Cs=1, fit_intercept=False, class_weight=class_weight, tol=1e-5, max_iter=10000, random_state=0) clf_saga = LogisticRegressionCV(solver='saga', Cs=1, fit_intercept=False, class_weight=class_weight, tol=1e-5, max_iter=10000, random_state=0) clf_lbf.fit(X, y) clf_ncg.fit(X, y) clf_lib.fit(X, y) clf_sag.fit(X, y) clf_saga.fit(X, y) assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4) def test_logistic_regression_sample_weights(): X, y = make_classification(n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0) sample_weight = y + 1 for LR in [LogisticRegression, LogisticRegressionCV]: # Test that passing sample_weight as ones is the same as # not passing them at all (default None) for solver in ['lbfgs', 'liblinear']: clf_sw_none = LR(solver=solver, fit_intercept=False, random_state=42) clf_sw_none.fit(X, y) clf_sw_ones = LR(solver=solver, fit_intercept=False, random_state=42) clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) assert_array_almost_equal( clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4) # Test that sample weights work the same with the lbfgs, # newton-cg, and 'sag' solvers clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42) clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42) clf_sw_n.fit(X, y, sample_weight=sample_weight) clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10, random_state=42) # ignore convergence warning due to small dataset with ignore_warnings(): clf_sw_sag.fit(X, y, sample_weight=sample_weight) clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False, random_state=42) clf_sw_liblinear.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4) # Test that passing class_weight as [1,2] is the same as # passing class weight = [1,1] but adjusting sample weights # to be 2 for all instances of class 2 for solver in ['lbfgs', 'liblinear']: clf_cw_12 = LR(solver=solver, fit_intercept=False, class_weight={0: 1, 1: 2}, random_state=42) clf_cw_12.fit(X, y) clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42) clf_sw_12.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_cw_12.coef_, clf_sw_12.coef_, decimal=4) # Test the above for l1 penalty and l2 penalty with dual=True. # since the patched liblinear code is different. clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, penalty="l1", tol=1e-5, random_state=42) clf_cw.fit(X, y) clf_sw = LogisticRegression( solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5, random_state=42) clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, penalty="l2", dual=True, random_state=42) clf_cw.fit(X, y) clf_sw = LogisticRegression( solver="liblinear", fit_intercept=False, penalty="l2", dual=True, random_state=42) clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) def _compute_class_weight_dictionary(y): # helper for returning a dictionary instead of an array classes = np.unique(y) class_weight = compute_class_weight("balanced", classes, y) class_weight_dict = dict(zip(classes, class_weight)) return class_weight_dict def test_logistic_regression_class_weights(): # Multinomial case: remove 90% of class 0 X = iris.data[45:, :] y = iris.target[45:] solvers = ("lbfgs", "newton-cg") class_weight_dict = _compute_class_weight_dictionary(y) for solver in solvers: clf1 = LogisticRegression(solver=solver, multi_class="multinomial", class_weight="balanced") clf2 = LogisticRegression(solver=solver, multi_class="multinomial", class_weight=class_weight_dict) clf1.fit(X, y) clf2.fit(X, y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4) # Binary case: remove 90% of class 0 and 100% of class 2 X = iris.data[45:100, :] y = iris.target[45:100] solvers = ("lbfgs", "newton-cg", "liblinear") class_weight_dict = _compute_class_weight_dictionary(y) for solver in solvers: clf1 = LogisticRegression(solver=solver, multi_class="ovr", class_weight="balanced") clf2 = LogisticRegression(solver=solver, multi_class="ovr", class_weight=class_weight_dict) clf1.fit(X, y) clf2.fit(X, y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) def test_logistic_regression_convergence_warnings(): # Test that warnings are raised if model does not converge X, y = make_classification(n_samples=20, n_features=20, random_state=0) clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1) assert_warns(ConvergenceWarning, clf_lib.fit, X, y) assert_equal(clf_lib.n_iter_, 2) def test_logistic_regression_multinomial(): # Tests for the multinomial option in logistic regression # Some basic attributes of Logistic Regression n_samples, n_features, n_classes = 50, 20, 3 X, y = make_classification(n_samples=n_samples, n_features=n_features, n_informative=10, n_classes=n_classes, random_state=0) # 'lbfgs' is used as a referenced solver = 'lbfgs' ref_i = LogisticRegression(solver=solver, multi_class='multinomial') ref_w = LogisticRegression(solver=solver, multi_class='multinomial', fit_intercept=False) ref_i.fit(X, y) ref_w.fit(X, y) assert_array_equal(ref_i.coef_.shape, (n_classes, n_features)) assert_array_equal(ref_w.coef_.shape, (n_classes, n_features)) for solver in ['sag', 'saga', 'newton-cg']: clf_i = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=2000, tol=1e-7, ) clf_w = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=2000, tol=1e-7, fit_intercept=False) clf_i.fit(X, y) clf_w.fit(X, y) assert_array_equal(clf_i.coef_.shape, (n_classes, n_features)) assert_array_equal(clf_w.coef_.shape, (n_classes, n_features)) # Compare solutions between lbfgs and the other solvers assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3) assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3) assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3) # Test that the path give almost the same results. However since in this # case we take the average of the coefs after fitting across all the # folds, it need not be exactly the same. for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']: clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6, multi_class='multinomial', Cs=[1.]) clf_path.fit(X, y) assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3) assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3) def test_multinomial_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features, n_classes = 100, 5, 3 X = rng.randn(n_samples, n_features) w = rng.rand(n_classes, n_features) Y = np.zeros((n_samples, n_classes)) ind = np.argmax(np.dot(X, w.T), axis=1) Y[range(0, n_samples), ind] = 1 w = w.ravel() sample_weights = np.ones(X.shape[0]) grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1., sample_weight=sample_weights) # extract first column of hessian matrix vec = np.zeros(n_features * n_classes) vec[0] = 1 hess_col = hessp(vec) # Estimate hessian using least squares as done in # test_logistic_grad_hess e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _multinomial_grad_hess(w + t * vec, X, Y, alpha=1., sample_weight=sample_weights)[0] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(hess_col, approx_hess_col) def test_liblinear_decision_function_zero(): # Test negative prediction when decision_function values are zero. # Liblinear predicts the positive class when decision_function values # are zero. This is a test to verify that we do not do the same. # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 X, y = make_classification(n_samples=5, n_features=5, random_state=0) clf = LogisticRegression(fit_intercept=False) clf.fit(X, y) # Dummy data such that the decision function becomes zero. X = np.zeros((5, 5)) assert_array_equal(clf.predict(X), np.zeros(5)) def test_liblinear_logregcv_sparse(): # Test LogRegCV with solver='liblinear' works for sparse matrices X, y = make_classification(n_samples=10, n_features=5, random_state=0) clf = LogisticRegressionCV(solver='liblinear') clf.fit(sparse.csr_matrix(X), y) def test_saga_sparse(): # Test LogRegCV with solver='liblinear' works for sparse matrices X, y = make_classification(n_samples=10, n_features=5, random_state=0) clf = LogisticRegressionCV(solver='saga') clf.fit(sparse.csr_matrix(X), y) def test_logreg_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: clf = LogisticRegression(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % clf.intercept_scaling) assert_raise_message(ValueError, msg, clf.fit, X, Y1) def test_logreg_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False clf = LogisticRegression(fit_intercept=False) clf.fit(X, Y1) assert_equal(clf.intercept_, 0.) def test_logreg_l1(): # Because liblinear penalizes the intercept and saga does not, we do not # fit the intercept to make it possible to compare the coefficients of # the two models at convergence. rng = np.random.RandomState(42) n_samples = 50 X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) X_noise = rng.normal(size=(n_samples, 3)) X_constant = np.ones(shape=(n_samples, 2)) X = np.concatenate((X, X_noise, X_constant), axis=1) lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear', fit_intercept=False, tol=1e-10) lr_liblinear.fit(X, y) lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga', fit_intercept=False, max_iter=1000, tol=1e-10) lr_saga.fit(X, y) assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) # Noise and constant features should be regularized to zero by the l1 # penalty assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) def test_logreg_l1_sparse_data(): # Because liblinear penalizes the intercept and saga does not, we do not # fit the intercept to make it possible to compare the coefficients of # the two models at convergence. rng = np.random.RandomState(42) n_samples = 50 X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) X_noise = rng.normal(scale=0.1, size=(n_samples, 3)) X_constant = np.zeros(shape=(n_samples, 2)) X = np.concatenate((X, X_noise, X_constant), axis=1) X[X < 1] = 0 X = sparse.csr_matrix(X) lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear', fit_intercept=False, tol=1e-10) lr_liblinear.fit(X, y) lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga', fit_intercept=False, max_iter=1000, tol=1e-10) lr_saga.fit(X, y) assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) # Noise and constant features should be regularized to zero by the l1 # penalty assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) # Check that solving on the sparse and dense data yield the same results lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga', fit_intercept=False, max_iter=1000, tol=1e-10) lr_saga_dense.fit(X.toarray(), y) assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) def test_logreg_cv_penalty(): # Test that the correct penalty is passed to the final fit. X, y = make_classification(n_samples=50, n_features=20, random_state=0) lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear') lr_cv.fit(X, y) lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear') lr.fit(X, y) assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_)) def test_logreg_predict_proba_multinomial(): X, y = make_classification(n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10) # Predicted probabilites using the true-entropy loss should give a # smaller loss than those using the ovr method. clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs") clf_multi.fit(X, y) clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs") clf_ovr.fit(X, y) clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) assert_greater(clf_ovr_loss, clf_multi_loss) # Predicted probabilites using the soft-max function should give a # smaller loss than those using the logistic function. clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) assert_greater(clf_wrong_loss, clf_multi_loss) @ignore_warnings def test_max_iter(): # Test that the maximum number of iteration is reached X, y_bin = iris.data, iris.target.copy() y_bin[y_bin == 2] = 0 solvers = ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs'] for max_iter in range(1, 5): for solver in solvers: for multi_class in ['ovr', 'multinomial']: if solver == 'liblinear' and multi_class == 'multinomial': continue lr = LogisticRegression(max_iter=max_iter, tol=1e-15, multi_class=multi_class, random_state=0, solver=solver) lr.fit(X, y_bin) assert_equal(lr.n_iter_[0], max_iter) def test_n_iter(): # Test that self.n_iter_ has the correct format. X, y = iris.data, iris.target y_bin = y.copy() y_bin[y_bin == 2] = 0 n_Cs = 4 n_cv_fold = 2 for solver in ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']: # OvR case n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0] clf = LogisticRegression(tol=1e-2, multi_class='ovr', solver=solver, C=1., random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes,)) n_classes = np.unique(y).shape[0] clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr', solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs)) clf.fit(X, y_bin) assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs)) # multinomial case n_classes = 1 if solver in ('liblinear', 'sag', 'saga'): break clf = LogisticRegression(tol=1e-2, multi_class='multinomial', solver=solver, C=1., random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes,)) clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial', solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs)) clf.fit(X, y_bin) assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs)) def test_warm_start(): # A 1-iteration second fit on same data should give almost same result # with warm starting, and quite different result without warm starting. # Warm starting does not work with liblinear solver. X, y = iris.data, iris.target solvers = ['newton-cg', 'sag', 'saga', 'lbfgs'] for warm_start in [True, False]: for fit_intercept in [True, False]: for solver in solvers: for multi_class in ['ovr', 'multinomial']: clf = LogisticRegression(tol=1e-4, multi_class=multi_class, warm_start=warm_start, solver=solver, random_state=42, max_iter=100, fit_intercept=fit_intercept) with ignore_warnings(category=ConvergenceWarning): clf.fit(X, y) coef_1 = clf.coef_ clf.max_iter = 1 clf.fit(X, y) cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) msg = ("Warm starting issue with %s solver in %s mode " "with fit_intercept=%s and warm_start=%s" % (solver, multi_class, str(fit_intercept), str(warm_start))) if warm_start: assert_greater(2.0, cum_diff, msg) else: assert_greater(cum_diff, 2.0, msg) def test_saga_vs_liblinear(): iris = load_iris() X, y = iris.data, iris.target X = np.concatenate([X] * 10) y = np.concatenate([y] * 10) X_bin = X[y <= 1] y_bin = y[y <= 1] * 2 - 1 X_sparse, y_sparse = make_classification(n_samples=50, n_features=20, random_state=0) X_sparse = sparse.csr_matrix(X_sparse) for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)): for penalty in ['l1', 'l2']: n_samples = X.shape[0] # alpha=1e-3 is time consuming for alpha in np.logspace(-1, 1, 3): saga = LogisticRegression( C=1. / (n_samples * alpha), solver='saga', multi_class='ovr', max_iter=200, fit_intercept=False, penalty=penalty, random_state=0, tol=1e-24) liblinear = LogisticRegression( C=1. / (n_samples * alpha), solver='liblinear', multi_class='ovr', max_iter=200, fit_intercept=False, penalty=penalty, random_state=0, tol=1e-24) saga.fit(X, y) liblinear.fit(X, y) # Convergence for alpha=1e-3 is very slow assert_array_almost_equal(saga.coef_, liblinear.coef_, 3) def test_dtype_match(): # Test that np.float32 input data is not cast to np.float64 when possible X_32 = np.array(X).astype(np.float32) y_32 = np.array(Y1).astype(np.float32) X_64 = np.array(X).astype(np.float64) y_64 = np.array(Y1).astype(np.float64) X_sparse_32 = sp.csr_matrix(X, dtype=np.float32) for solver in ['newton-cg']: for multi_class in ['ovr', 'multinomial']: # Check type consistency lr_32 = LogisticRegression(solver=solver, multi_class=multi_class) lr_32.fit(X_32, y_32) assert_equal(lr_32.coef_.dtype, X_32.dtype) # check consistency with sparsity lr_32_sparse = LogisticRegression(solver=solver, multi_class=multi_class) lr_32_sparse.fit(X_sparse_32, y_32) assert_equal(lr_32_sparse.coef_.dtype, X_sparse_32.dtype) # Check accuracy consistency lr_64 = LogisticRegression(solver=solver, multi_class=multi_class) lr_64.fit(X_64, y_64) assert_equal(lr_64.coef_.dtype, X_64.dtype) assert_almost_equal(lr_32.coef_, lr_64.coef_.astype(np.float32))
bsd-3-clause
kjford/Connectomics
explore.py
1
2004
''' explore connectomics data ''' import procdata as p import numpy as np import pickle from sklearn.metrics import pairwise_distances large=0 if large: Ffile='normal-1/fluorescence_normal-1.txt' netfile='normal-1/network_normal-1.txt' prefix='normal' n=1000 predfile='results/XC_normal-1_allspikes_sym.csv' spikefile='normal-1/fluorescence_normal-1_spikes.p' posfile='normal-1/networkPositions_normal-1.txt' else: Ffile='small/fluorescence_iNet1_Size100_CC04inh.txt' netfile='small/network_iNet1_Size100_CC04inh.txt' prefix='smallcc04' n=100 predfile='results/xc_smallcc03_all_sym.csv' spikefile='small/fluorescence_iNet1_Size100_CC04inh_spikes.p' burstfile='small/fluorescence_iNet1_Size100_CC04inh_bursts.p' posfile='small/networkPositions_iNet1_Size100_CC04inh.txt' def VAT(d): dprime=np.zeros_like(d) n=d.shape[0] dmat=d+d.max()*np.identity(n) K=range(n) r1=dmat.sum(axis=1).argmin() dprime[0]=dmat[r1] I=[r1] K.pop(r1) for i in range(n)[1:]: subd=dmat[I] ri,rj=np.unravel_index(subd[:,K].argmin(),subd[:,K].shape) #rj=subd[:,K].argmin() I.append(K[rj]) if len(K)>=1: K.pop(rj) dprime=dmat[I][:,I] dprime=dprime-dprime*np.identity(n) return dprime,I # load network netw=p.loadNetwork(netfile,n) pos=p.loadPositions(posfile,n) xc=p.loadPrediction(predfile,prefix,n) cvmat=netw.dot(netw.T) distmat=pairwise_distances(netw,metric='hamming') allspikes=pickle.load(open(spikefile,'r')) bursts=pickle.load(open(burstfile,'r')) spikesonly=[] for i in range(n): spikesonly.append(list(set.difference(set(allspikes[i]),set(bursts[i])))) bothtimes=p.getPatterns(allspikes,n,minneurons=1) btimes=p.getPatterns(bursts,n,minneurons=1) stimes=p.getPatterns(spikesonly,n,minneurons=1) bfreq=btimes.sum(1)*1.0 sfreq=stimes.sum(1)*1.0 dprime,indord=VAT(distmat) reord=netw[indord] reord=reord[:,reord[-1].argsort()[::-1]]
gpl-3.0
f3r/scikit-learn
setup.py
19
11460
#! /usr/bin/env python # # Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com> # 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr> # License: 3-clause BSD import subprocess descr = """A set of python modules for machine learning and data mining""" import sys import os import shutil from distutils.command.clean import clean as Clean from pkg_resources import parse_version if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins # This is a bit (!) hackish: we are setting a global variable so that the main # sklearn __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by scikit-learn to recursively # build the compiled extensions in sub-packages is based on the Python import # machinery. builtins.__SKLEARN_SETUP__ = True DISTNAME = 'scikit-learn' DESCRIPTION = 'A set of python modules for machine learning and data mining' with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAINTAINER = 'Andreas Mueller' MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de' URL = 'http://scikit-learn.org' LICENSE = 'new BSD' DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/' # We can actually import a restricted version of sklearn that # does not need the compiled code import sklearn VERSION = sklearn.__version__ from sklearn._build_utils import cythonize # Optional setuptools features # We need to import setuptools early, if we want setuptools features, # as it monkey-patches the 'setup' function # For some commands, use setuptools SETUPTOOLS_COMMANDS = set([ 'develop', 'release', 'bdist_egg', 'bdist_rpm', 'bdist_wininst', 'install_egg_info', 'build_sphinx', 'egg_info', 'easy_install', 'upload', 'bdist_wheel', '--single-version-externally-managed', ]) if SETUPTOOLS_COMMANDS.intersection(sys.argv): import setuptools extra_setuptools_args = dict( zip_safe=False, # the package can run out of an .egg file include_package_data=True, ) else: extra_setuptools_args = dict() # Custom clean command to remove build artifacts class CleanCommand(Clean): description = "Remove build artifacts from the source tree" def run(self): Clean.run(self) # Remove c files if we are not within a sdist package cwd = os.path.abspath(os.path.dirname(__file__)) remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO')) if remove_c_files: cython_hash_file = os.path.join(cwd, 'cythonize.dat') if os.path.exists(cython_hash_file): os.unlink(cython_hash_file) print('Will remove generated .c files') if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sklearn'): for filename in filenames: if any(filename.endswith(suffix) for suffix in (".so", ".pyd", ".dll", ".pyc")): os.unlink(os.path.join(dirpath, filename)) continue extension = os.path.splitext(filename)[1] if remove_c_files and extension in ['.c', '.cpp']: pyx_file = str.replace(filename, extension, '.pyx') if os.path.exists(os.path.join(dirpath, pyx_file)): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname == '__pycache__': shutil.rmtree(os.path.join(dirpath, dirname)) cmdclass = {'clean': CleanCommand} # Optional wheelhouse-uploader features # To automate release of binary packages for scikit-learn we need a tool # to download the packages generated by travis and appveyor workers (with # version number matching the current release) and upload them all at once # to PyPI at release time. # The URL of the artifact repositories are configured in the setup.cfg file. WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all']) if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv): import wheelhouse_uploader.cmd cmdclass.update(vars(wheelhouse_uploader.cmd)) def configuration(parent_package='', top_path=None): if os.path.exists('MANIFEST'): os.remove('MANIFEST') from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) # Avoid non-useful msg: # "Ignoring attempt to set 'name' (from ... " config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sklearn') return config scipy_min_version = '0.9' numpy_min_version = '1.6.1' def get_scipy_status(): """ Returns a dictionary containing a boolean specifying whether SciPy is up-to-date, along with the version string (empty string if not installed). """ scipy_status = {} try: import scipy scipy_version = scipy.__version__ scipy_status['up_to_date'] = parse_version( scipy_version) >= parse_version(scipy_min_version) scipy_status['version'] = scipy_version except ImportError: scipy_status['up_to_date'] = False scipy_status['version'] = "" return scipy_status def get_numpy_status(): """ Returns a dictionary containing a boolean specifying whether NumPy is up-to-date, along with the version string (empty string if not installed). """ numpy_status = {} try: import numpy numpy_version = numpy.__version__ numpy_status['up_to_date'] = parse_version( numpy_version) >= parse_version(numpy_min_version) numpy_status['version'] = numpy_version except ImportError: numpy_status['up_to_date'] = False numpy_status['version'] = "" return numpy_status def generate_cython(): cwd = os.path.abspath(os.path.dirname(__file__)) print("Cythonizing sources") cythonize.main(cwd) def setup_package(): metadata = dict(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, version=VERSION, download_url=DOWNLOAD_URL, long_description=LONG_DESCRIPTION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], cmdclass=cmdclass, **extra_setuptools_args) if len(sys.argv) == 1 or ( len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean'))): # For these actions, NumPy is not required, nor Cythonization # # They are required to succeed without Numpy for example when # pip is used to install Scikit-learn when Numpy is not yet present in # the system. try: from setuptools import setup except ImportError: from distutils.core import setup metadata['version'] = VERSION else: numpy_status = get_numpy_status() numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format( numpy_min_version) scipy_status = get_scipy_status() scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format( scipy_min_version) instructions = ("Installation instructions are available on the " "scikit-learn website: " "http://scikit-learn.org/stable/install.html\n") if numpy_status['up_to_date'] is False: if numpy_status['version']: raise ImportError("Your installation of Numerical Python " "(NumPy) {0} is out-of-date.\n{1}{2}" .format(numpy_status['version'], numpy_req_str, instructions)) else: raise ImportError("Numerical Python (NumPy) is not " "installed.\n{0}{1}" .format(numpy_req_str, instructions)) if scipy_status['up_to_date'] is False: if scipy_status['version']: raise ImportError("Your installation of Scientific Python " "(SciPy) {0} is out-of-date.\n{1}{2}" .format(scipy_status['version'], scipy_req_str, instructions)) else: raise ImportError("Scientific Python (SciPy) is not " "installed.\n{0}{1}" .format(scipy_req_str, instructions)) from numpy.distutils.core import setup metadata['configuration'] = configuration if len(sys.argv) >= 2 and sys.argv[1] not in 'config': # Cythonize if needed print('Generating cython files') cwd = os.path.abspath(os.path.dirname(__file__)) if not os.path.exists(os.path.join(cwd, 'PKG-INFO')): # Generate Cython sources, unless building from source release generate_cython() # Clean left-over .so file for dirpath, dirnames, filenames in os.walk( os.path.join(cwd, 'sklearn')): for filename in filenames: extension = os.path.splitext(filename)[1] if extension in (".so", ".pyd", ".dll"): pyx_file = str.replace(filename, extension, '.pyx') print(pyx_file) if not os.path.exists(os.path.join(dirpath, pyx_file)): os.unlink(os.path.join(dirpath, filename)) setup(**metadata) if __name__ == "__main__": setup_package()
bsd-3-clause
gotomypc/scikit-learn
sklearn/linear_model/tests/test_base.py
120
10082
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # # License: BSD 3 clause import numpy as np from scipy import sparse from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.base import center_data, sparse_center_data from sklearn.utils import check_random_state from sklearn.datasets.samples_generator import make_sparse_uncorrelated from sklearn.datasets.samples_generator import make_regression def test_linear_regression(): # Test LinearRegression on a simple dataset. # a simple dataset X = [[1], [2]] Y = [1, 2] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [1, 2]) # test it also for degenerate input X = [[1]] Y = [0] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [0]) def test_fit_intercept(): # Test assertions on betas shape. X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) X3 = np.array([[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]]) y = np.array([1, 1]) lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y) lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y) assert_equal(lr2_with_intercept.coef_.shape, lr2_without_intercept.coef_.shape) assert_equal(lr3_with_intercept.coef_.shape, lr3_without_intercept.coef_.shape) assert_equal(lr2_without_intercept.coef_.ndim, lr3_without_intercept.coef_.ndim) def test_linear_regression_sparse(random_state=0): "Test that linear regression also works with sparse data" random_state = check_random_state(random_state) for i in range(10): n = 100 X = sparse.eye(n, n) beta = random_state.rand(n) y = X * beta[:, np.newaxis] ols = LinearRegression() ols.fit(X, y.ravel()) assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) assert_array_almost_equal(ols.residues_, 0) def test_linear_regression_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions" X, y = make_regression(random_state=random_state) Y = np.vstack((y, y)).T n_features = X.shape[1] clf = LinearRegression(fit_intercept=True) clf.fit((X), Y) assert_equal(clf.coef_.shape, (2, n_features)) Y_pred = clf.predict(X) clf.fit(X, y) y_pred = clf.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_linear_regression_sparse_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions with sparse data" random_state = check_random_state(random_state) X, y = make_sparse_uncorrelated(random_state=random_state) X = sparse.coo_matrix(X) Y = np.vstack((y, y)).T n_features = X.shape[1] ols = LinearRegression() ols.fit(X, Y) assert_equal(ols.coef_.shape, (2, n_features)) Y_pred = ols.predict(X) ols.fit(X, y.ravel()) y_pred = ols.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) expected_X_mean = np.mean(X, axis=0) # XXX: currently scaled to variance=n_samples expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0]) expected_y_mean = np.mean(y, axis=0) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_center_data_multioutput(): n_samples = 200 n_features = 3 n_outputs = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_outputs) expected_y_mean = np.mean(y, axis=0) args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))] for center, X in args: _, yt, _, y_mean, _ = center(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(y_mean, np.zeros(n_outputs)) assert_array_almost_equal(yt, y) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) def test_center_data_weighted(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) sample_weight = rng.rand(n_samples) expected_X_mean = np.average(X, axis=0, weights=sample_weight) expected_y_mean = np.average(y, axis=0, weights=sample_weight) # XXX: if normalize=True, should we expect a weighted standard deviation? # Currently not weighted, but calculated with respect to weighted mean # XXX: currently scaled to variance=n_samples expected_X_std = (np.sqrt(X.shape[0]) * np.mean((X - expected_X_mean) ** 2, axis=0) ** .5) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_sparse_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) # random_state not supported yet in sparse.rand X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng X = X.tolil() y = rng.rand(n_samples) XA = X.toarray() # XXX: currently scaled to variance=n_samples expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0]) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt.A, XA / expected_X_std) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) def test_csr_sparse_center_data(): # Test output format of sparse_center_data, when input is csr X, y = make_regression() X[X < 2.5] = 0.0 csr = sparse.csr_matrix(X) csr_, y, _, _, _ = sparse_center_data(csr, y, True) assert_equal(csr_.getformat(), 'csr')
bsd-3-clause
nolanliou/tensorflow
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
153
6723
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """sklearn cross-support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import numpy as np import six def _pprint(d): return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()]) class _BaseEstimator(object): """This is a cross-import when sklearn is not available. Adopted from sklearn.BaseEstimator implementation. https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py """ def get_params(self, deep=True): """Get parameters for this estimator. Args: deep: boolean, optional If `True`, will return the parameters for this estimator and contained subobjects that are estimators. Returns: params : mapping of string to any Parameter names mapped to their values. """ out = dict() param_names = [name for name in self.__dict__ if not name.startswith('_')] for key in param_names: value = getattr(self, key, None) if isinstance(value, collections.Callable): continue # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The former have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Args: **params: Parameters. Returns: self Raises: ValueError: If params contain invalid names. """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False)),) # pylint: disable=old-style-class class _ClassifierMixin(): """Mixin class for all classifiers.""" pass class _RegressorMixin(): """Mixin class for all regression estimators.""" pass class _TransformerMixin(): """Mixin class for all transformer estimators.""" class NotFittedError(ValueError, AttributeError): """Exception class to raise if estimator is used before fitting. This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. Examples: >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import NotFittedError >>> try: ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) ... except NotFittedError as e: ... print(repr(e)) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS NotFittedError('This LinearSVC instance is not fitted yet',) Copied from https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py """ # pylint: enable=old-style-class def _accuracy_score(y_true, y_pred): score = y_true == y_pred return np.average(score) def _mean_squared_error(y_true, y_pred): if len(y_true.shape) > 1: y_true = np.squeeze(y_true) if len(y_pred.shape) > 1: y_pred = np.squeeze(y_pred) return np.average((y_true - y_pred)**2) def _train_test_split(*args, **options): # pylint: disable=missing-docstring test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) if test_size is None and train_size is None: train_size = 0.75 elif train_size is None: train_size = 1 - test_size train_size = int(train_size * args[0].shape[0]) np.random.seed(random_state) indices = np.random.permutation(args[0].shape[0]) train_idx, test_idx = indices[:train_size], indices[train_size:] result = [] for x in args: result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)] return tuple(result) # If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn. TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False) if TRY_IMPORT_SKLEARN: # pylint: disable=g-import-not-at-top,g-multiple-import,unused-import from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin from sklearn.metrics import accuracy_score, log_loss, mean_squared_error from sklearn.cross_validation import train_test_split try: from sklearn.exceptions import NotFittedError except ImportError: try: from sklearn.utils.validation import NotFittedError except ImportError: pass else: # Naive implementations of sklearn classes and functions. BaseEstimator = _BaseEstimator ClassifierMixin = _ClassifierMixin RegressorMixin = _RegressorMixin TransformerMixin = _TransformerMixin accuracy_score = _accuracy_score log_loss = None mean_squared_error = _mean_squared_error train_test_split = _train_test_split
apache-2.0
jpzk/evopy
evopy/examples/experiments/parameter_c_dsessvc/latex.py
1
3638
''' This file is part of evopy. Copyright 2012 - 2013, Jendrik Poloczek evopy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evopy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evopy. If not, see <http://www.gnu.org/licenses/>. ''' from sys import path path.append("../../../..") from pickle import load from copy import deepcopy from numpy import matrix, log10, log2, array from scipy.stats import wilcoxon from itertools import chain from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR from evopy.strategies.ori_dses_svc import ORIDSESSVC from evopy.strategies.ori_dses import ORIDSES from evopy.simulators.simulator import Simulator from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1 from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2 from evopy.problems.schwefels_problem_26 import SchwefelsProblem26 from evopy.problems.tr_problem import TRProblem from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel from sklearn.cross_validation import KFold from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore from evopy.operators.scaling.scaling_dummy import ScalingDummy from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear from evopy.operators.termination.or_combinator import ORCombinator from evopy.operators.termination.accuracy import Accuracy from evopy.operators.termination.generations import Generations from evopy.operators.termination.convergence import Convergence from setup import * cfile = file("output/parameterc_file.save", "r") parameter_c = load(cfile) # statistics variable_names = ['min', 'max'] variables = {} for variable in variable_names: variables[variable] = create_problem_optimizer_map(0.0) filter_none = lambda x : type(x) != type(None) for problem in problems: for optimizer in optimizers[problem]: cs = parameter_c[problem][optimizer] cs = list(chain.from_iterable(cs)) variables['min'][problem][optimizer] = log2(min(filter(filter_none, cs))) variables['max'][problem][optimizer] = log2(max(filter(filter_none, cs))) results = file("output/results.tex","w") lines = [ "\\begin{tabularx}{\\textwidth}{l X X}\n", "\\toprule\n", "\\textbf{Problem} & Minimum & Maximum \\\\\n", "\midrule\n", "Kugel. R. 1 & $2^{%i}$ & $2^{%i}$ \\\\\n"\ % (variables['min'][SphereProblemOriginR1][get_method_SphereProblemR1_svc],\ variables['max'][SphereProblemOriginR1][get_method_SphereProblemR1_svc]),\ "Kugel. R. 2 & $2^{%i}$ & $2^{%i}$ \\\\\n"\ % (variables['min'][SphereProblemOriginR2][get_method_SphereProblemR2_svc],\ variables['max'][SphereProblemOriginR2][get_method_SphereProblemR2_svc]),\ "TR2 & $2^{%i}$ & $2^{%i}$ \\\\\n"\ % (variables['min'][TRProblem][get_method_TR_svc],\ variables['max'][TRProblem][get_method_TR_svc]),\ "2.60 mit R. & $2^{%i}$ & $2^{%i}$ \\\\\n"\ % (variables['min'][SchwefelsProblem26][get_method_Schwefel26_svc],\ variables['max'][SchwefelsProblem26][get_method_Schwefel26_svc]),\ "\\bottomrule\n",\ "\end{tabularx}\n"] results.writelines(lines) results.close()
gpl-3.0
vybstat/scikit-learn
sklearn/svm/tests/test_bounds.py
280
2541
import nose from nose.tools import assert_equal, assert_true from sklearn.utils.testing import clean_warning_registry import warnings import numpy as np from scipy import sparse as sp from sklearn.svm.bounds import l1_min_c from sklearn.svm import LinearSVC from sklearn.linear_model.logistic import LogisticRegression dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]] sparse_X = sp.csr_matrix(dense_X) Y1 = [0, 1, 1, 1] Y2 = [2, 1, 0, 0] def test_l1_min_c(): losses = ['squared_hinge', 'log'] Xs = {'sparse': sparse_X, 'dense': dense_X} Ys = {'two-classes': Y1, 'multi-class': Y2} intercepts = {'no-intercept': {'fit_intercept': False}, 'fit-intercept': {'fit_intercept': True, 'intercept_scaling': 10}} for loss in losses: for X_label, X in Xs.items(): for Y_label, Y in Ys.items(): for intercept_label, intercept_params in intercepts.items(): check = lambda: check_l1_min_c(X, Y, loss, **intercept_params) check.description = ('Test l1_min_c loss=%r %s %s %s' % (loss, X_label, Y_label, intercept_label)) yield check def test_l2_deprecation(): clean_warning_registry() with warnings.catch_warnings(record=True) as w: assert_equal(l1_min_c(dense_X, Y1, "l2"), l1_min_c(dense_X, Y1, "squared_hinge")) assert_equal(w[0].category, DeprecationWarning) def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None): min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling) clf = { 'log': LogisticRegression(penalty='l1'), 'squared_hinge': LinearSVC(loss='squared_hinge', penalty='l1', dual=False), }[loss] clf.fit_intercept = fit_intercept clf.intercept_scaling = intercept_scaling clf.C = min_c clf.fit(X, y) assert_true((np.asarray(clf.coef_) == 0).all()) assert_true((np.asarray(clf.intercept_) == 0).all()) clf.C = min_c * 1.01 clf.fit(X, y) assert_true((np.asarray(clf.coef_) != 0).any() or (np.asarray(clf.intercept_) != 0).any()) @nose.tools.raises(ValueError) def test_ill_posed_min_c(): X = [[0, 0], [0, 0]] y = [0, 1] l1_min_c(X, y) @nose.tools.raises(ValueError) def test_unsupported_loss(): l1_min_c(dense_X, Y1, 'l1')
bsd-3-clause
robin-lai/scikit-learn
examples/covariance/plot_mahalanobis_distances.py
348
6232
r""" ================================================================ Robust covariance estimation and Mahalanobis distances relevance ================================================================ An example to show covariance estimation with the Mahalanobis distances on Gaussian distributed data. For Gaussian distributed data, the distance of an observation :math:`x_i` to the mode of the distribution can be computed using its Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i - \mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are the location and the covariance of the underlying Gaussian distribution. In practice, :math:`\mu` and :math:`\Sigma` are replaced by some estimates. The usual covariance maximum likelihood estimate is very sensitive to the presence of outliers in the data set and therefor, the corresponding Mahalanobis distances are. One would better have to use a robust estimator of covariance to guarantee that the estimation is resistant to "erroneous" observations in the data set and that the associated Mahalanobis distances accurately reflect the true organisation of the observations. The Minimum Covariance Determinant estimator is a robust, high-breakdown point (i.e. it can be used to estimate the covariance matrix of highly contaminated datasets, up to :math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers) estimator of covariance. The idea is to find :math:`\frac{n_\text{samples}+n_\text{features}+1}{2}` observations whose empirical covariance has the smallest determinant, yielding a "pure" subset of observations from which to compute standards estimates of location and covariance. The Minimum Covariance Determinant estimator (MCD) has been introduced by P.J.Rousseuw in [1]. This example illustrates how the Mahalanobis distances are affected by outlying data: observations drawn from a contaminating distribution are not distinguishable from the observations coming from the real, Gaussian distribution that one may want to work with. Using MCD-based Mahalanobis distances, the two populations become distinguishable. Associated applications are outliers detection, observations ranking, clustering, ... For visualization purpose, the cubic root of the Mahalanobis distances are represented in the boxplot, as Wilson and Hilferty suggest [2] [1] P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984. [2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square. Proceedings of the National Academy of Sciences of the United States of America, 17, 684-688. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.covariance import EmpiricalCovariance, MinCovDet n_samples = 125 n_outliers = 25 n_features = 2 # generate data gen_cov = np.eye(n_features) gen_cov[0, 0] = 2. X = np.dot(np.random.randn(n_samples, n_features), gen_cov) # add some outliers outliers_cov = np.eye(n_features) outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7. X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov) # fit a Minimum Covariance Determinant (MCD) robust estimator to data robust_cov = MinCovDet().fit(X) # compare estimators learnt from the full data set with true parameters emp_cov = EmpiricalCovariance().fit(X) ############################################################################### # Display results fig = plt.figure() plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05) # Show data set subfig1 = plt.subplot(3, 1, 1) inlier_plot = subfig1.scatter(X[:, 0], X[:, 1], color='black', label='inliers') outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:], color='red', label='outliers') subfig1.set_xlim(subfig1.get_xlim()[0], 11.) subfig1.set_title("Mahalanobis distances of a contaminated data set:") # Show contours of the distance functions xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100), np.linspace(plt.ylim()[0], plt.ylim()[1], 100)) zz = np.c_[xx.ravel(), yy.ravel()] mahal_emp_cov = emp_cov.mahalanobis(zz) mahal_emp_cov = mahal_emp_cov.reshape(xx.shape) emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov), cmap=plt.cm.PuBu_r, linestyles='dashed') mahal_robust_cov = robust_cov.mahalanobis(zz) mahal_robust_cov = mahal_robust_cov.reshape(xx.shape) robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov), cmap=plt.cm.YlOrBr_r, linestyles='dotted') subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1], inlier_plot, outlier_plot], ['MLE dist', 'robust dist', 'inliers', 'outliers'], loc="upper right", borderaxespad=0) plt.xticks(()) plt.yticks(()) # Plot the scores for each point emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33) subfig2 = plt.subplot(2, 2, 3) subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25) subfig2.plot(1.26 * np.ones(n_samples - n_outliers), emp_mahal[:-n_outliers], '+k', markeredgewidth=1) subfig2.plot(2.26 * np.ones(n_outliers), emp_mahal[-n_outliers:], '+k', markeredgewidth=1) subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15) subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)") plt.yticks(()) robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33) subfig3 = plt.subplot(2, 2, 4) subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]], widths=.25) subfig3.plot(1.26 * np.ones(n_samples - n_outliers), robust_mahal[:-n_outliers], '+k', markeredgewidth=1) subfig3.plot(2.26 * np.ones(n_outliers), robust_mahal[-n_outliers:], '+k', markeredgewidth=1) subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15) subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)") plt.yticks(()) plt.show()
bsd-3-clause
zharfi/Cidar
Auto/Training_Part/Naive Bayes/30fitur/Cdr_NB.py
1
6292
# -*- coding: utf-8 -*- """ Created on Wed Jul 26 17:15:15 2017 @author: Visual.Sensor """ # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import pickle # Applying Grid Search to find the best hyperparameter from sklearn.model_selection import GridSearchCV # Name filename_noproc = 'NB_noproc.sav' filename_pca = 'NB_pca.sav' filename_lda = 'NB_lda.sav' filename_kpca = 'NB_kpca.sav' filename_scale = 'scale.sav' filename_dr_pca = 'pca.sav' filename_dr_lda = 'lda.sav' filename_dr_kpca = 'kpca.sav' filename_res_noproc = 'NB_res_noproc.txt' filename_res_pca = 'NB_res_pca.txt' filename_res_lda = 'NB_res_lda.txt' filename_res_kpca = 'NB_res_kpca.txt' # Grid Searching with Parallel Computing def cariGrid(clsf, preproc, xtr, ytr, xte, yte, accu, std, test_accu): parameters = {"criterion": ["gini", "entropy"], "min_samples_split": [2, 4, 10, 20, 30], "max_depth": [None, 2, 5, 10, 15, 20], "min_samples_leaf": [1, 5, 10, 20], "max_leaf_nodes": [None, 5, 10, 15, 20] } grid_search = GridSearchCV(estimator=clsf, param_grid=parameters, scoring='accuracy', cv=10, n_jobs=-1, verbose=0) grid_search = grid_search.fit(xtr, ytr) best_accuracy = grid_search.best_score_ best_index = grid_search.best_index_ best_std = grid_search.cv_results_['std_test_score'][best_index] best_parameters = grid_search.best_params_ clsf = GaussianNB(**best_parameters).fit(xtr, ytr) # Calculate test accuracy with optimized training test_optimized = grid_search.score(xte, yte) if preproc == 'noproc': pickle.dump(clsf, open(filename_noproc, 'wb')) with open(filename_res_noproc, "w") as text_file: text_file.write("%f %f %f %f %f %f %s" % (accu, std, test_accu, best_accuracy, best_std, test_optimized, best_parameters)) elif preproc == 'pca': pickle.dump(clsf, open(filename_pca, 'wb')) with open(filename_res_pca, "w") as text_file: text_file.write("%f %f %f %f %f %f %s" % (accu, std, test_accu, best_accuracy, best_std, test_optimized, best_parameters)) elif preproc == 'lda': pickle.dump(clsf, open(filename_lda, 'wb')) with open(filename_res_lda, "w") as text_file: text_file.write("%f %f %f %f %f %f %s" % (accu, std, test_accu, best_accuracy, best_std, test_optimized, best_parameters)) else: pickle.dump(clsf, open(filename_kpca, 'wb')) with open(filename_res_kpca, "w") as text_file: text_file.write("%f %f %f %f %f %f %s" % (accu, std, test_accu, best_accuracy, best_std, test_optimized, best_parameters)) print(best_accuracy) print(best_std) print(best_parameters) print(test_optimized) # Importing the dataset dataset = pd.read_csv('100_auto_python.csv', sep=';') X = dataset.iloc[:, 0:29].values y = dataset.iloc[:, 29].values # Splitting the dataset into the Training set and Test set from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) pickle.dump(sc, open(filename_scale, 'wb')) preprocess = ['noproc', 'pca', 'lda', 'kpca'] for i in preprocess: if i == 'noproc': pass elif i == 'pca': from sklearn.decomposition import PCA pca = PCA(n_components = 10) X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) explained_variance = pca.explained_variance_ratio_ pickle.dump(pca, open(filename_dr_pca, 'wb')) elif i == 'lda': from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA lda = LDA(n_components= 5, ) X_train = lda.fit_transform(X_train, y_train) X_test = lda.transform(X_test) pickle.dump(lda, open(filename_dr_lda, 'wb')) else: from sklearn.decomposition import KernelPCA kpca = KernelPCA(n_components= 10, kernel='rbf') X_train = kpca.fit_transform(X_train) X_test = kpca.transform(X_test) pickle.dump(kpca, open(filename_dr_kpca, 'wb')) # Fitting classifier to the Training set from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) # Applying k-Fold Cross Validation from sklearn.model_selection import cross_val_score accuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10) avg_accuracy = accuracies.mean() std_accuracy = accuracies.std() print('Akurasi: ', avg_accuracy) print('SD: ', std_accuracy) # Test unoptimized performance # Predicting the Test set results test_accuracy = classifier.score(X_test, y_test) print('Akurasi Tes:', test_accuracy) # Making the Confusion Matrix y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) if i == 'noproc': pickle.dump(classifier, open(filename_noproc, 'wb')) with open(filename_res_noproc, "w") as text_file: text_file.write("%f %f %f" % (avg_accuracy, std_accuracy, test_accuracy)) elif i == 'pca': pickle.dump(classifier, open(filename_pca, 'wb')) with open(filename_res_pca, "w") as text_file: text_file.write("%f %f %f" % (avg_accuracy, std_accuracy, test_accuracy)) elif i == 'lda': pickle.dump(classifier, open(filename_lda, 'wb')) with open(filename_res_lda, "w") as text_file: text_file.write("%f %f %f" % (avg_accuracy, std_accuracy, test_accuracy)) else: pickle.dump(classifier, open(filename_kpca, 'wb')) with open(filename_res_kpca, "w") as text_file: text_file.write("%f %f %f" % (avg_accuracy, std_accuracy, test_accuracy)) # if __name__ == '__main__': # cariGrid(classifier, i, X_train, y_train, X_test, y_test, avg_accuracy, std_accuracy, test_accuracy)
mit
nitin-cherian/LifeLongLearning
Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/ipykernel/inprocess/tests/test_kernel.py
8
2417
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import sys import unittest from ipykernel.inprocess.blocking import BlockingInProcessKernelClient from ipykernel.inprocess.manager import InProcessKernelManager from ipykernel.inprocess.ipkernel import InProcessKernel from ipykernel.tests.utils import assemble_output from IPython.testing.decorators import skipif_not_matplotlib from IPython.utils.io import capture_output from ipython_genutils import py3compat if py3compat.PY3: from io import StringIO else: from StringIO import StringIO class InProcessKernelTestCase(unittest.TestCase): def setUp(self): self.km = InProcessKernelManager() self.km.start_kernel() self.kc = self.km.client() self.kc.start_channels() self.kc.wait_for_ready() @skipif_not_matplotlib def test_pylab(self): """Does %pylab work in the in-process kernel?""" kc = self.kc kc.execute('%pylab') out, err = assemble_output(kc.iopub_channel) self.assertIn('matplotlib', out) def test_raw_input(self): """ Does the in-process kernel handle raw_input correctly? """ io = StringIO('foobar\n') sys_stdin = sys.stdin sys.stdin = io try: if py3compat.PY3: self.kc.execute('x = input()') else: self.kc.execute('x = raw_input()') finally: sys.stdin = sys_stdin self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar') def test_stdout(self): """ Does the in-process kernel correctly capture IO? """ kernel = InProcessKernel() with capture_output() as io: kernel.shell.run_cell('print("foo")') self.assertEqual(io.stdout, 'foo\n') kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session) kernel.frontends.append(kc) kc.execute('print("bar")') out, err = assemble_output(kc.iopub_channel) self.assertEqual(out, 'bar\n') def test_getpass_stream(self): "Tests that kernel getpass accept the stream parameter" kernel = InProcessKernel() kernel._allow_stdin = True kernel._input_request = lambda *args, **kwargs : None kernel.getpass(stream='non empty')
mit
pdanelson/bondora-auto-investor
parameter_tuning.py
1
1594
from datetime import datetime import pandas import numpy import xgboost from sklearn.model_selection import GridSearchCV from sklearn.externals import joblib from loan_classifier.data_transformer import DataTransformer # Preprocess data data = pandas.read_csv("LoanData.csv") data.LoanDate = data.LoanDate.astype("datetime64") data = data[(data.LoanDate + numpy.timedelta64(2, 'Y') < datetime.today()) & (data.Country == "EE")] input = DataTransformer().transform(data) target = pandas.isnull(data.DefaultDate) # Grid search to tune the parameters scale_pos_weight = (len(target) - sum(target))/sum(target) # nr of negative cases divided by nr of positive cases model = xgboost.XGBClassifier(scale_pos_weight=scale_pos_weight) cv_params = {"max_depth": [6, 7, 8, 9], "n_estimators": [200, 400, 600, 800], "min_child_weight": [1, 1.5], "gamma": [0, 0.01, 0.1], "learning_rate": [0.05, 0.1, 0.2]} grid_search = GridSearchCV(model, cv_params, scoring="roc_auc", n_jobs=-1, cv=5, verbose=1) grid_result = grid_search.fit(input, target) joblib.dump(grid_result, "grid_result.pkl") # Cross-validation with early stopping and best parameters from grid search to tune the number of boosting rounds dtrain = xgboost.DMatrix(input, target) params = {"objective": "binary:logistic", "scale_pos_weight": scale_pos_weight, "eta": 0.2, "max_depth": 8, "min_child_weight": 1.5, "gamma": 0} cv_xgb = xgboost.cv(params, dtrain, num_boost_round=2000, nfold=5, metrics=["auc"], early_stopping_rounds=200)
mit
gloriakang/vax-sentiment
to_do/vax_temp/csv-multidig.py
1
3141
# coding: utf-8 # MultiDiGraph import pandas as pd import numpy as np import networkx as nx from copy import deepcopy import sys import matplotlib.pyplot as plt #%matplotlib inline from glob import glob fileName = sys.argv[1] def getFiles(fileName): matches = glob('*'+fileName+'*') bigFile = matches[0] data = pd.DataFrame.from_csv(bigFile) return clearSource(data) def clearSource(data): columns = ['source','target'] pre = len(data) for column in columns: data = data[pd.notnull(data[column])] post = len(data) print "Filtered %s rows to %s rows by removing rows with blank values in columns %s" % (pre,post,columns) return data def getStuff(data,labels): forEdges = labels == ['edge'] columns = list(data.columns.values) items = dict() nameFunc = {True: lambda x,y: '%s - %s - %s' % (x['source'],x['edge'],x['target']), False: lambda x,y: x[y]}[forEdges] extra = ['source','target'] * forEdges for label in labels: relevant = [col for col in columns if label+'-' in col] + extra print "Extracting %s data from %s" % (label,relevant) for i in data.index: row = data.ix[i] for col in relevant: if str(row[col]).lower() != 'nan': name = nameFunc(row,label) if name not in items: items[name] = dict() items[name][col.replace(label+'-','')] = row[col] return items def getNodes(data): return getStuff(data,['source','target']) def getEdges(data): return getStuff(data,['edge']) def addNodes(graph,nodes): for key,value in nodes.iteritems(): graph.add_node(key,attr_dict=value) return graph def addEdges(graph,edges): for key,value in edges.iteritems(): value['label'] = key value['edge'] = key.split(' - ')[1] graph.add_edge(value['source'],value['target'],attr_dict = value) return graph def createNetwork(edges,nodes): graph = nx.MultiDiGraph() graph = addNodes(graph,nodes) graph = addEdges(graph,edges) return graph def drawIt(graph,what='graph'): style=nx.spring_layout(graph) size = graph.number_of_nodes() print "Drawing %s of size %s:" % (what,size) if size > 20: plt.figure(figsize=(10,10)) if size > 40: nx.draw(graph,style,node_size=60,font_size=8) else: nx.draw(graph,style) else: nx.draw(graph,style) plt.show() def getGraph(fileRef): data = getFiles(fileName) nodes = getNodes(data) edges = getEdges(data) graph = createNetwork(edges,nodes) fileOut = fileRef.split('.')[0]+'.gml' print "Writing GML file to %s" % fileOut nx.write_gml(graph, fileOut) fileOutNet = fileRef.split('.')[0]+'.net' print "Writing net file to %s" % fileOutNet nx.write_pajek(graph, fileOutNet) params = (graph.number_of_nodes(),graph.number_of_edges()) print "Graph has %s nodes, %s edges" % params print getGraph(fileName)
mit
danellecline/mbari-aesa
learn.py
1
20418
#!/usr/bin/env python __author__ = "Danelle Cline" __copyright__ = "Copyright 2016, MBARI" __license__ = "GNU License" __maintainer__ = "Danelle Cline" __email__ = "dcline at mbari.org" __status__ = "Development" __doc__ = ''' This script runs transfer learning on the AESA training data set using the inception v3 model trained on ImageNet Based on the TensorFlow code: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py Prerequisites: @undocumented: __doc__ parser @author: __author__ @status: __status__ @license: __license__ ''' import json import conf import sys import argparse import os import util_plot import numpy as np import util import time import pandas as pd import transfer_model as transfer_model import transfer_model_multilabel as transfer_model_multilabel import tensorflow as tf from tensorflow.python.platform import gfile from scipy.misc import imresize def process_command_line(): from argparse import RawTextHelpFormatter examples = 'Examples:' + '\n\n' examples += sys.argv[0] + " --image_dir /tmp/data/images_by_group/cropped_images/" \ " --bottleneck_dir /tmp/data/images_by_group/cropped_images/bottleneck" \ " --model_dir /tmp/model_output/default" parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description='Run transfer learning on folder of images organized by label ', epilog=examples) # Input and output file flags. parser.add_argument('--image_dir', type=str, required=False, help="Path to folders of labeled images.") parser.add_argument('--exemplar_dir', type=str, required=True, help="Path to folders of exemplar images for each label") # where the model information lives parser.add_argument('--model_dir', type=str, default=os.path.join( "/tmp/tfmodels/img_classify", str(int(time.time()))), help='Directory for storing model info') # run prediction only parser.add_argument('--predict_only', dest='predict_only', action='store_true', help="Run prediction only; checkpointed model must exist.") parser.add_argument('--prediction_image_dir', type=str, default='prediction_images', help="Directory of images to use for predictions") # Details of the training configuration. parser.add_argument('--num_steps', type=int, default=15000, help="How many training steps to run before ending.") parser.add_argument('--learning_rate', type=float, default=0.01, help="How large a learning rate to use when training.") parser.add_argument('--testing_percentage', type=int, default=10, help="What percentage of images to use as a test set.") parser.add_argument('--validation_percentage', type=int, default=10, help="What percentage of images to use as a validation set.") parser.add_argument('--eval_step_interval', type=int, default=10, help="How often to evaluate the training results.") parser.add_argument('--train_batch_size', type=int, default=100, help="How many images to train on at a time.") parser.add_argument('--test_batch_size', type=int, default=500, help="""How many images to test on at a time. This test set is only used infrequently to verify the overall accuracy of the model.""") parser.add_argument( '--validation_batch_size', type=int, default=100, help="""How many images to use in an evaluation batch. This validation set is used much more often than the test set, and is an early indicator of how accurate the model is during training.""") # File-system cache locations. parser.add_argument('--incp_model_dir', type=str, default='/tmp/imagenet', help="""Path to graph.pb for a given model""") parser.add_argument('--bottleneck_dir', type=str, default='/tmp/bottlenecks', help="Path to cache bottleneck layer values as files.") parser.add_argument('--final_tensor_name', type=str, default='final_result', help="The name of the output classification layer in the retrained graph.") # Controls the distortions used during training. group = parser.add_mutually_exclusive_group() group.add_argument('--flip_left_right', action='store_true', default=False, help="Whether to randomly flip the training images horizontally.") parser.add_argument('--random_crop', type=int, default=0, help="""A percentage determining how much of a margin to randomly crop off the training images.""") parser.add_argument('--random_scale', type=int, default=0, help="""A percentage determining how much to randomly scale up the size of the training images by.""") parser.add_argument('--random_brightness', type=int, default=0, help="""A percentage determining how much to randomly multiply the training image input pixels up or down by.""") # Custom selections AESA training set parser.add_argument('--skiplt50', dest='skiplt50', action='store_true', help="Skip over classes less than 50 images") parser.add_argument('--exclude_unknown', dest='exclude_unknown', action='store_true', help="Exclude classes/categories that include the unknown category") parser.add_argument('--exclude_partials', dest='exclude_partials', action='store_true', help="Exclude partial fauna images from training/testing") parser.add_argument('--annotation_file', type=str, help="Path to annotation file.") parser.add_argument('--multilabel_category_group', action='store_true', default=False, help="Whether to learning a multilabel both by Category and Group)") parser.add_argument('--multilabel_group_feedingtype', action='store_true', default=False, help="Whether to learning a multilabel both by Group and Feeding Type)") parser.add_argument('--multilabel_tl_category', action='store_true', default=False, help="Whether to learning a multilabel both by TentacleLength and Category )") args = parser.parse_args() return args def create_inception_graph(sess, model_filename): """"Creates a graph from saved GraphDef file and returns a Graph object. Returns: Graph holding the trained Inception network, and various tensors we'll be manipulating. """ # import the graph and give me nodes where we want to pull the bottleneck data from with gfile.FastGFile(model_filename, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = ( tf.import_graph_def(graph_def, name='', return_elements=[ conf.BOTTLENECK_TENSOR_NAME, conf.JPEG_DATA_TENSOR_NAME, conf.RESIZED_INPUT_TENSOR_NAME])) return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor): """Retrieves or calculates bottleneck values for an image. If a cached version of the bottleneck data exists on-disk, return that, otherwise calculate the data and save it to disk for future use. Args: sess: The current active TensorFlow Session. image_lists: Dictionary of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be modulo-ed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of which set to pull images from: training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: The tensor to feed loaded jpeg data into. bottleneck_tensor: The output tensor for the bottleneck values. Returns: Numpy array of values produced by the bottleneck layer for the image. Original image path string """ label_lists = image_lists[label_name] sub_dir = label_lists['dir'] sub_dir_path = os.path.join(bottleneck_dir, sub_dir) util.ensure_dir(sub_dir_path) bottleneck_path = util.get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category) image_path = util.get_image_path(image_lists, label_name, index, image_dir, category) if not os.path.exists(bottleneck_path): print('Creating bottleneck at ' + bottleneck_path) if not gfile.Exists(image_path): tf.logging.fatal('File does not exist %s', image_path) image_data = gfile.FastGFile(image_path, 'rb').read() bottleneck_values = util.run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor) bottleneck_string = ','.join(str(x) for x in bottleneck_values) with open(bottleneck_path, 'w') as bottleneck_file: bottleneck_file.write(bottleneck_string) with open(bottleneck_path, 'r') as bottleneck_file: bottleneck_string = bottleneck_file.read() bottleneck_values = [float(x) for x in bottleneck_string.split(',')] return bottleneck_values, image_path def add_images(sess, paths, model_dir): filename_queue = tf.train.string_input_producer(paths) reader = tf.WholeFileReader() # Read a whole file from the queue, the first returned value in the tuple is the # filename which we are ignoring. _, image_file = reader.read(filename_queue) # Decode the image as a JPEG file, this will turn it into a Tensor which we can # then use in training. nth_image = 10 num_images = int(len(paths)/nth_image) image = tf.image.decode_jpeg(image_file) image_tensors = np.zeros((num_images, conf.MODEL_INPUT_WIDTH, conf.MODEL_INPUT_WIDTH, 3), dtype=np.float32) # Add an Op to initialize all variables. init_op = tf.global_variables_initializer() with sess.as_default(): # Run the Op that initializes all variables. sess.run(init_op) # Coordinate the loading of image files. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) j = 0 # Write summary writer = tf.summary.FileWriter(model_dir) if num_images > 0: for count, name in enumerate(paths, 1): if count % nth_image == 0: image_tensor = image.eval() image_tensors[j] = imresize(image_tensor, [conf.MODEL_INPUT_WIDTH,conf.MODEL_INPUT_WIDTH]) print(str(j) + ' images files created.') j += 1 # Add image summary summary_op = tf.summary.image("plot", image_tensors, num_images ) summary = sess.run(summary_op) writer.add_summary(summary) writer.close() # Finish off the filename queue coordinator. coord.request_stop() coord.join(threads) if __name__ == '__main__': df = pd.DataFrame() args = process_command_line() if args.annotation_file: print("Using annotation file " + args.annotation_file) if not gfile.Exists(args.annotation_file): print("Image directory '" + args.annotation_file + "' not found.") exit(-1) else: df = pd.read_csv(args.annotation_file, sep=',') '''if args.multilabel_category_group or args.multilabel_group_feedingtype and not args.annotation_file: print("Require the annotation file to determine the multiple labels") exit(-1) if args.exclude_partials and not args.annotation_file: print("Require the annotation file to determine the partial specimen images") exit(-1)''' sess = tf.Session() # Set up the pre-trained graph. print("Using model directory {0} and model from {1}".format(args.model_dir, conf.DATA_URL)) util.ensure_dir(args.model_dir) util.maybe_download_and_extract(data_url=conf.DATA_URL, dest_dir=args.incp_model_dir) model_filename = os.path.join(args.incp_model_dir, conf.MODEL_GRAPH_NAME) graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor=(create_inception_graph(sess, model_filename)) labels_list = None output_labels_file = os.path.join(args.model_dir, "output_labels.json") output_labels_file_lt20 = os.path.join(args.model_dir, "output_labels_lt20.json") d = os.path.dirname(output_labels_file_lt20) util.ensure_dir(d) # Create example images exemplars = util.create_image_exemplars(args.exemplar_dir) # Look at the folder structure, and create lists of all the images. if not args.predict_only: image_lists = util.create_image_lists(df, args.skiplt50, args.exclude_unknown, args.exclude_partials, output_labels_file, output_labels_file_lt20, args.image_dir, args.testing_percentage, args.validation_percentage) class_count = len(image_lists.keys()) if class_count == 0: print('No valid folders of images found at ' + args.image_dir) exit(-1) if class_count == 1: print('Only one valid folder of images found at ' + args.image_dir + ' - multiple classes are needed for classification.') exit(-1) # See if the command-line flags mean we're applying any distortions. do_distort_images = (args.flip_left_right or (args.random_crop != 0) or (args.random_scale != 0) or (args.random_brightness != 0)) if do_distort_images: # We will be applying distortions, so setup the operations we'll need. distorted_jpeg_data_tensor, distorted_image_tensor = util.add_input_distortions( args.flip_left_right, args.random_crop, args.random_scale, args.random_brightness) else: # We'll make sure we've calculated the 'bottleneck' image summaries and # cached them on disk. util.cache_bottlenecks(sess, image_lists, args.image_dir, args.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) if args.multilabel_category_group: train_bottlenecks, train_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks_multilabel_category_group( sess, df, image_lists, 'training', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) elif args.multilabel_group_feedingtype: train_bottlenecks, train_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks_multilabel_feedingtype( sess, df, image_lists, 'training', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) else: train_bottlenecks, train_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks(sess, image_lists, 'training', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) train_bottlenecks = np.array(train_bottlenecks) train_ground_truth = np.array(train_ground_truth) else: # load the labels list, needed to create the model; exit if it's not there if gfile.Exists(output_labels_file): with open(output_labels_file, 'r') as lfile: labels_string = lfile.read() labels_list = json.loads(labels_string) print("labels list: %s" % labels_list) class_count = len(labels_list) else: print("Labels list %s not found" % output_labels_file) exit(-1) # Define the custom estimator if args.multilabel_category_group or args.multilabel_group_feedingtype: class_count = 2*len(all_label_names) model_fn = transfer_model_multilabel.make_model_fn(class_count, args.final_tensor_name, args.learning_rate) else: model_fn = transfer_model.make_model_fn(class_count, args.final_tensor_name, args.learning_rate) model_params = {} classifier = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params, model_dir=args.model_dir) if not args.predict_only: # run the training print("Starting training for %s steps max" % args.num_steps) classifier.fit( x=train_bottlenecks.astype(np.float32), y=train_ground_truth, batch_size=10, max_steps=args.num_steps) # We've completed our training, so run a test evaluation on some new images we haven't used before. if args.multilabel_category_group: test_bottlenecks, test_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks_multilabel_category_group( sess, df, image_lists, 'testing', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) elif args.multilabel_group_feedingtype: test_bottlenecks, test_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks_multilabel_feedingtype( sess, df, image_lists, 'testing', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) else: test_bottlenecks, test_ground_truth, image_paths, all_label_names = util.get_all_cached_bottlenecks( sess, image_lists, 'testing', args.bottleneck_dir, args.image_dir, jpeg_data_tensor, bottleneck_tensor) test_bottlenecks = np.array(test_bottlenecks) test_ground_truth = np.array(test_ground_truth) print("evaluating....") if args.multilabel_category_group or args.multilabel_group_feedingtype: print("Evaluating cached bottlenecks") classifier.evaluate(test_bottlenecks.astype(np.float32), test_ground_truth) else: classifier.evaluate(test_bottlenecks.astype(np.float32), test_ground_truth) # write the output labels file if it doesn't already exist if gfile.Exists(output_labels_file): print("Labels list file already exists; not writing.") else: output_labels = json.dumps(list(image_lists.keys())) with gfile.FastGFile(output_labels_file, 'w') as f: f.write(output_labels) print("\nSaving metrics...") if not args.multilabel_category_group and not args.multilabel_group_feedingtype: util.save_metrics(args, classifier, test_bottlenecks.astype(np.float32), all_label_names, test_ground_truth, image_paths, image_lists) util_plot.plot_metrics(args.model_dir, '') else: util.save_metrics_category_group(args, classifier, test_bottlenecks.astype(np.float32), all_label_names, test_ground_truth, image_paths, image_lists) util_plot.plot_metrics(args.model_dir, 'multilabel_category_group') else: print("\nPredicting...") img_list = util.get_prediction_images(args.prediction_image_dir) if not img_list: print("No images found in %s" % args.prediction_image_dir) else: util.make_image_predictions(output_labels_file, classifier, jpeg_data_tensor, bottleneck_tensor, img_list, labels_list, os.path.join(args.prediction_image_dir,'classified')) print("Done !")
gpl-3.0
YihaoLu/statsmodels
statsmodels/sandbox/examples/ex_random_panel.py
34
6012
# -*- coding: utf-8 -*- """ Created on Fri May 18 13:05:47 2012 Author: Josef Perktold moved example from main of random_panel """ import numpy as np from statsmodels.sandbox.panel.panel_short import ShortPanelGLS, ShortPanelGLS2 from statsmodels.sandbox.panel.random_panel import PanelSample import statsmodels.sandbox.panel.correlation_structures as cs import statsmodels.stats.sandwich_covariance as sw #from statsmodels.stats.sandwich_covariance import ( # S_hac_groupsum, weights_bartlett, _HCCM2) from statsmodels.stats.moment_helpers import cov2corr, se_cov cov_nw_panel2 = sw.cov_nw_groupsum examples = ['ex1'] if 'ex1' in examples: nobs = 100 nobs_i = 5 n_groups = nobs // nobs_i k_vars = 3 # dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_equi, # corr_args=(0.6,)) # dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_ar, # corr_args=([1, -0.95],)) dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_arma, corr_args=([1], [1., -0.9],), seed=377769) print('seed', dgp.seed) y = dgp.generate_panel() noise = y - dgp.y_true print(np.corrcoef(y.reshape(-1,n_groups, order='F'))) print(np.corrcoef(noise.reshape(-1,n_groups, order='F'))) mod = ShortPanelGLS2(y, dgp.exog, dgp.groups) res = mod.fit() print(res.params) print(res.bse) #Now what? #res.resid is of transformed model #np.corrcoef(res.resid.reshape(-1,n_groups, order='F')) y_pred = np.dot(mod.exog, res.params) resid = y - y_pred print(np.corrcoef(resid.reshape(-1,n_groups, order='F'))) print(resid.std()) err = y_pred - dgp.y_true print(err.std()) #OLS standard errors are too small mod.res_pooled.params mod.res_pooled.bse #heteroscedasticity robust doesn't help mod.res_pooled.HC1_se #compare with cluster robust se print(sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int)))) #not bad, pretty close to panel estimator #and with Newey-West Hac print(sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx))) #too small, assuming no bugs, #see Peterson assuming it refers to same kind of model print(dgp.cov) mod2 = ShortPanelGLS(y, dgp.exog, dgp.groups) res2 = mod2.fit_iterative(2) print(res2.params) print(res2.bse) #both implementations produce the same results: from numpy.testing import assert_almost_equal assert_almost_equal(res.params, res2.params, decimal=12) assert_almost_equal(res.bse, res2.bse, decimal=13) mod5 = ShortPanelGLS(y, dgp.exog, dgp.groups) res5 = mod5.fit_iterative(5) print(res5.params) print(res5.bse) #fitting once is the same as OLS #note: I need to create new instance, otherwise it continuous fitting mod1 = ShortPanelGLS(y, dgp.exog, dgp.groups) res1 = mod1.fit_iterative(1) res_ols = mod1._fit_ols() assert_almost_equal(res1.params, res_ols.params, decimal=12) assert_almost_equal(res1.bse, res_ols.bse, decimal=13) #cov_hac_panel with uniform_kernel is the same as cov_cluster for balanced #panel with full length kernel #I fixe default correction to be equal mod2._fit_ols() cov_clu = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int)) clubse = se_cov(cov_clu) cov_uni = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx, weights_func=sw.weights_uniform, use_correction='cluster') assert_almost_equal(cov_uni, cov_clu, decimal=7) #without correction cov_clu2 = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int), use_correction=False) cov_uni2 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx, weights_func=sw.weights_uniform, use_correction=False) assert_almost_equal(cov_uni2, cov_clu2, decimal=8) cov_white = sw.cov_white_simple(mod2.res_pooled) cov_pnw0 = sw.cov_nw_panel(mod2.res_pooled, 0, mod2.group.groupidx, use_correction='hac') assert_almost_equal(cov_pnw0, cov_white, decimal=13) time = np.tile(np.arange(nobs_i), n_groups) #time = mod2.group.group_int cov_pnw1 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx) cov_pnw2 = cov_nw_panel2(mod2.res_pooled, 4, time) #s = sw.group_sums(x, time) c2, ct, cg = sw.cov_cluster_2groups(mod2.res_pooled, time, dgp.groups.astype(int), use_correction=False) ct_nw0 = cov_nw_panel2(mod2.res_pooled, 0, time, weights_func=sw.weights_uniform, use_correction=False) cg_nw0 = cov_nw_panel2(mod2.res_pooled, 0, dgp.groups.astype(int), weights_func=sw.weights_uniform, use_correction=False) assert_almost_equal(ct_nw0, ct, decimal=13) assert_almost_equal(cg_nw0, cg, decimal=13) #pnw2 0 lags assert_almost_equal(cov_clu2, cg, decimal=13) assert_almost_equal(cov_uni2, cg, decimal=8) #pnw all lags import pandas as pa #pandas.DataFrame doesn't do inplace append se = pa.DataFrame(res_ols.bse[None,:], index=['OLS']) se = se.append(pa.DataFrame(res5.bse[None,:], index=['PGLSit5'])) clbse = sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int))) se = se.append(pa.DataFrame(clbse[None,:], index=['OLSclu'])) pnwse = sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx)) se = se.append(pa.DataFrame(pnwse[None,:], index=['OLSpnw'])) print(se) #list(se.index) from statsmodels.iolib.table import SimpleTable headers = [str(i) for i in se.columns] stubs=list(se.index) # print SimpleTable(np.round(np.asarray(se), 4), # headers=headers, # stubs=stubs) print(SimpleTable(np.asarray(se), headers=headers, stubs=stubs, txt_fmt=dict(data_fmts=['%10.4f']), title='Standard Errors'))
bsd-3-clause
cdawei/flickr-photo
src/traj_visualise.py
2
5397
#!/usr/bin/env python3 import sys import random import numpy as np import pandas as pd from datetime import datetime from fastkml import kml, styles from shapely.geometry import Point, LineString def load_traj(ftable1, ftable2): """Load data""" traj_data = pd.read_csv(ftable1, parse_dates=[3], skipinitialspace=True) traj_stats = pd.read_csv(ftable2, parse_dates=[3], skipinitialspace=True) return traj_data, traj_stats def gen_kml(fname, traj_data, traj_stats, traj_id_list, traj_name_list=None): """Generate KML file""" assert(len(traj_id_list) > 0) if traj_name_list: assert(len(traj_id_list) == len(traj_name_list)) k = kml.KML() ns = '{http://www.opengis.net/kml/2.2}' stid = 'style1' # colors in KML: aabbggrr, aa=00 is fully transparent # developers.google.com/kml/documentation/kmlreference?hl=en#colorstyle st = styles.Style(id=stid, styles=[styles.LineStyle(color='2f0000ff', width=3)]) # transparent red doc = kml.Document(ns, '001', 'Trajectories', 'Trajectory visualization', styles=[st]) k.append(doc) stats = traj_stats[traj_stats['Trajectory_ID'].isin(traj_id_list)] assert(stats.shape[0] == len(traj_id_list)) pm_traj = [] pm_photo = [] for i in range(len(stats.index)): ri = stats.index[i] traj_id = stats.ix[ri]['Trajectory_ID'] photos = traj_data[traj_data['Trajectory_ID'] == traj_id] lngs = [lng for lng in photos['Longitude'].tolist()] lats = [lat for lat in photos['Latitude'].tolist()] name = 'Trajectory_' + str(traj_id) if traj_name_list: name += '_' + traj_name_list[i] desc = 'User_ID: ' + str(stats.ix[ri]['User_ID']) + \ '<br/>Start_Time: ' + str(stats.ix[ri]['Start_Time']) + \ '<br/>Travel_Distance: ' + str(round(stats.ix[ri]['Travel_Distance(km)'], 2)) + ' km' + \ '<br/>Total_Time: ' + str(round(stats.ix[ri]['Total_Time(min)'], 2)) + ' min' + \ '<br/>Average_Speed: ' + str(round(stats.ix[ri]['Average_Speed(km/h)'], 2)) + ' km/h' + \ '<br/>#Photos: ' + str(stats.ix[ri]['#Photo']) + \ '<br/>Photos: ' + str(photos['Photo_ID'].tolist()) pm = kml.Placemark(ns, str(traj_id), name, desc, styleUrl='#' + stid) pm.geometry = LineString([(lngs[j], lats[j]) for j in range(len(lngs))]) pm_traj.append(pm) for rj in photos.index: name = 'Photo_' + str(photos.ix[rj]['Photo_ID']) desc = 'Trajectory_ID: ' + str(traj_id) + \ '<br/>Photo_ID: ' + str(photos.ix[rj]['Photo_ID']) + \ '<br/>User_ID: ' + str(photos.ix[rj]['User_ID']) + \ '<br/>Timestamp: ' + str(photos.ix[rj]['Timestamp']) + \ '<br/>Coordinates: (' + str(photos.ix[rj]['Longitude']) + ', ' + str(photos.ix[rj]['Latitude']) + ')' + \ '<br/>Accuracy: ' + str(photos.ix[rj]['Accuracy']) + \ '<br/>URL: ' + str(photos.ix[rj]['URL']) pm = kml.Placemark(ns, str(photos.ix[rj]['Photo_ID']), name, desc) pm.geometry = Point(photos.ix[rj]['Longitude'], photos.ix[rj]['Latitude']) pm_photo.append(pm) for pm in pm_traj: doc.append(pm) for pm in pm_photo: doc.append(pm) kmlstr = k.to_string(prettyprint=True) with open(fname, 'w') as f: f.write('<?xml version="1.0" encoding="UTF-8"?>\n') f.write(kmlstr) def main(ftable1, ftable2): """Main Procedure""" # load data traj_data, traj_stats = load_traj(ftable1, ftable2) # remove trajctories with only one photos traj_stats = traj_stats[traj_stats['#Photo'] > 1] # remove trajctories with zero distance traj_stats = traj_stats[traj_stats['Travel_Distance(km)'] > 1e-4] # trajectory with the most number of photos ri = traj_stats['#Photo'].idxmax() traj_id = traj_stats.ix[ri]['Trajectory_ID'] fname = 'most_photos.kml' gen_kml(fname, traj_data, traj_stats, [traj_id], ['most_photos']) # trajectory took the longest time ri = traj_stats['Total_Time(min)'].idxmax() traj_id = traj_stats.ix[ri]['Trajectory_ID'] fname = 'longest_time.kml' gen_kml(fname, traj_data, traj_stats, [traj_id], ['longest_time']) # trajectory took the longest distance ri = traj_stats['Travel_Distance(km)'].idxmax() traj_id = traj_stats.ix[ri]['Trajectory_ID'] fname = 'longest_distance.kml' gen_kml(fname, traj_data, traj_stats, [traj_id], ['longest_distance']) # trajectory has the highest speed ri = traj_stats['Average_Speed(km/h)'].idxmax() traj_id = traj_stats.ix[ri]['Trajectory_ID'] fname = 'highest_speed.kml' gen_kml(fname, traj_data, traj_stats, [traj_id], ['highest_speed']) # random 5 trajectories traj_id_list = traj_stats['Trajectory_ID'].sample(n=5).tolist() # requires pandas version >= 0.16.1 fname = 'random5.kml' gen_kml(fname, traj_data, traj_stats, traj_id_list) if __name__ == '__main__': if len(sys.argv) != 3: print('Usage: ', sys.argv[0], 'TRAJECTORY_PHOTO_FILE TRAJECTORY_STATS_FILE') print('e.g. : ', sys.argv[0], 'trajectory_photos.csv trajectory_stats.csv') sys.exit(0) ftable1 = sys.argv[1] ftable2 = sys.argv[2] main(ftable1, ftable2)
gpl-2.0
jayflo/scikit-learn
examples/calibration/plot_calibration_multiclass.py
272
6972
""" ================================================== Probability Calibration for 3-class classification ================================================== This example illustrates how sigmoid calibration changes predicted probabilities for a 3-class classification problem. Illustrated is the standard 2-simplex, where the three corners correspond to the three classes. Arrows point from the probability vectors predicted by an uncalibrated classifier to the probability vectors predicted by the same classifier after sigmoid calibration on a hold-out validation set. Colors indicate the true class of an instance (red: class 1, green: class 2, blue: class 3). The base classifier is a random forest classifier with 25 base estimators (trees). If this classifier is trained on all 800 training datapoints, it is overly confident in its predictions and thus incurs a large log-loss. Calibrating an identical classifier, which was trained on 600 datapoints, with method='sigmoid' on the remaining 200 datapoints reduces the confidence of the predictions, i.e., moves the probability vectors from the edges of the simplex towards the center. This calibration results in a lower log-loss. Note that an alternative would have been to increase the number of base estimators which would have resulted in a similar decrease in log-loss. """ print(__doc__) # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD Style. import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.ensemble import RandomForestClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import log_loss np.random.seed(0) # Generate data X, y = make_blobs(n_samples=1000, n_features=2, random_state=42, cluster_std=5.0) X_train, y_train = X[:600], y[:600] X_valid, y_valid = X[600:800], y[600:800] X_train_valid, y_train_valid = X[:800], y[:800] X_test, y_test = X[800:], y[800:] # Train uncalibrated random forest classifier on whole train and validation # data and evaluate on test data clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train_valid, y_train_valid) clf_probs = clf.predict_proba(X_test) score = log_loss(y_test, clf_probs) # Train random forest classifier, calibrate on validation data and evaluate # on test data clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train, y_train) clf_probs = clf.predict_proba(X_test) sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit") sig_clf.fit(X_valid, y_valid) sig_clf_probs = sig_clf.predict_proba(X_test) sig_score = log_loss(y_test, sig_clf_probs) # Plot changes in predicted probabilities via arrows plt.figure(0) colors = ["r", "g", "b"] for i in range(clf_probs.shape[0]): plt.arrow(clf_probs[i, 0], clf_probs[i, 1], sig_clf_probs[i, 0] - clf_probs[i, 0], sig_clf_probs[i, 1] - clf_probs[i, 1], color=colors[y_test[i]], head_width=1e-2) # Plot perfect predictions plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1") plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2") plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3") # Plot boundaries of unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex") # Annotate points on the simplex plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)', xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.plot([1.0/3], [1.0/3], 'ko', ms=5) plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)', xy=(.5, .0), xytext=(.5, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)', xy=(.0, .5), xytext=(.1, .5), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)', xy=(.5, .5), xytext=(.6, .6), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $0$, $1$)', xy=(0, 0), xytext=(.1, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($1$, $0$, $0$)', xy=(1, 0), xytext=(1, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $1$, $0$)', xy=(0, 1), xytext=(.1, 1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') # Add grid plt.grid("off") for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], 'k', alpha=0.2) plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2) plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2) plt.title("Change of predicted probabilities after sigmoid calibration") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) plt.legend(loc="best") print("Log-loss of") print(" * uncalibrated classifier trained on 800 datapoints: %.3f " % score) print(" * classifier trained on 600 datapoints and calibrated on " "200 datapoint: %.3f" % sig_score) # Illustrate calibrator plt.figure(1) # generate grid over 2-simplex p1d = np.linspace(0, 1, 20) p0, p1 = np.meshgrid(p1d, p1d) p2 = 1 - p0 - p1 p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()] p = p[p[:, 2] >= 0] calibrated_classifier = sig_clf.calibrated_classifiers_[0] prediction = np.vstack([calibrator.predict(this_p) for calibrator, this_p in zip(calibrated_classifier.calibrators_, p.T)]).T prediction /= prediction.sum(axis=1)[:, None] # Ploit modifications of calibrator for i in range(prediction.shape[0]): plt.arrow(p[i, 0], p[i, 1], prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1], head_width=1e-2, color=colors[np.argmax(p[i])]) # Plot boundaries of unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex") plt.grid("off") for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], 'k', alpha=0.2) plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2) plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2) plt.title("Illustration of sigmoid calibrator") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) plt.show()
bsd-3-clause
themiurgo/folium
tests/test_folium.py
1
30095
# -*- coding: utf-8 -*- """ Folium Tests ------- """ import pytest import os import json try: from unittest import mock except ImportError: import mock import pandas as pd import jinja2 from jinja2 import Environment, PackageLoader import vincent import folium import base64 from folium.six import PY3 from folium.element import Html from folium.map import Popup, Marker, FitBounds, FeatureGroup from folium.features import (DivIcon, GeoJson, ColorScale, TopoJson, PolyLine, MultiPolyLine, ImageOverlay) rootpath = os.path.abspath(os.path.dirname(__file__)) def setup_data(): """Import economic data for testing.""" with open(os.path.join(rootpath, 'us-counties.json'), 'r') as f: get_id = json.load(f) county_codes = [x['id'] for x in get_id['features']] county_df = pd.DataFrame({'FIPS_Code': county_codes}, dtype=str) # Read into Dataframe, cast to string for consistency. df = pd.read_csv(os.path.join(rootpath, 'us_county_data.csv'), na_values=[' ']) df['FIPS_Code'] = df['FIPS_Code'].astype(str) # Perform an inner join, pad NA's with data from nearest county. merged = pd.merge(df, county_df, on='FIPS_Code', how='inner') return merged.fillna(method='pad') def test_get_templates(): """Test template getting.""" env = folium.utilities.get_templates() assert isinstance(env, jinja2.environment.Environment) class TestFolium(object): """Test class for the Folium library.""" def setup(self): """Setup Folium Map.""" with mock.patch('folium.element.uuid4') as uuid4: uuid4().hex = '0' * 32 self.map = folium.Map(location=[45.5236, -122.6750], width=900, height=400, max_zoom=20, zoom_start=4) self.env = Environment(loader=PackageLoader('folium', 'templates')) def test_init(self): """Test map initialization.""" assert self.map.get_name() == 'map_00000000000000000000000000000000' assert self.map.get_root() == self.map._parent assert self.map.location == [45.5236, -122.6750] assert self.map.zoom_start == 4 assert self.map.max_lat == 90 assert self.map.min_lat == -90 assert self.map.max_lon == 180 assert self.map.min_lon == -180 assert self.map.position == 'relative' assert self.map.height == (400, 'px') assert self.map.width == (900, 'px') assert self.map.left == (0, '%') assert self.map.top == (0, '%') assert self.map.to_dict() == { "name": "Map", "id": "00000000000000000000000000000000", "children": { "openstreetmap": { "name": "TileLayer", "id": "00000000000000000000000000000000", "children": {} } } } def test_cloudmade(self): """Test cloudmade tiles and the API key.""" with pytest.raises(ValueError): folium.Map(location=[45.5236, -122.6750], tiles='cloudmade') map = folium.Map(location=[45.5236, -122.6750], tiles='cloudmade', API_key='###') cloudmade = 'http://{s}.tile.cloudmade.com/###/997/256/{z}/{x}/{y}.png' assert map._children['cloudmade'].tiles == cloudmade def test_builtin_tile(self): """Test custom maptiles.""" default_tiles = ['OpenStreetMap', 'Stamen Terrain', 'Stamen Toner'] for tiles in default_tiles: map = folium.Map(location=[45.5236, -122.6750], tiles=tiles) tiles = ''.join(tiles.lower().strip().split()) url = 'tiles/{}/tiles.txt'.format attr = 'tiles/{}/attr.txt'.format url = map._env.get_template(url(tiles)).render() attr = map._env.get_template(attr(tiles)).render() assert map._children[tiles].tiles == url assert map._children[tiles].attr == attr def test_custom_tile(self): """Test custom tile URLs.""" url = 'http://{s}.custom_tiles.org/{z}/{x}/{y}.png' attr = 'Attribution for custom tiles' with pytest.raises(ValueError): folium.Map(location=[45.5236, -122.6750], tiles=url) map = folium.Map(location=[45.52, -122.67], tiles=url, attr=attr) assert map._children[url].tiles == url assert map._children[url].attr == attr def test_wms_layer(self): """Test WMS layer URLs.""" map = folium.Map(location=[44, -73], zoom_start=3) wms_url = 'http://gis.srh.noaa.gov/arcgis/services/NDFDTemps/' wms_url += 'MapServer/WMSServer' wms_name = "Temperature" wms_layers = 16 wms_format = "image/png" map.add_wms_layer(wms_name=wms_name, wms_url=wms_url, wms_format=wms_format, wms_layers=wms_layers, wms_transparent=True) wms_temp = self.env.get_template('wms_layer.js') wms = wms_temp.render({'wms_name': map._children[wms_name].get_name(), 'wms_url': wms_url, 'wms_format': wms_format, 'wms_layer_names': wms_layers, 'wms_transparent': 'true'}) assert (''.join(wms.split())[:-1] in ''.join(map.get_root().render().split())) def test_feature_group(self): """Test FeatureGroup.""" map = folium.Map() feature_group = FeatureGroup() feature_group.add_children(Marker([45, -30], popup=Popup('-30'))) feature_group.add_children(Marker([45, 30], popup=Popup('30'))) map.add_children(feature_group) map.add_children(folium.map.LayerControl()) map._repr_html_() def test_simple_marker(self): """Test simple marker addition.""" self.map = folium.Map(location=[44, -73], zoom_start=3) mark_templ = self.env.get_template('simple_marker.js') popup_templ = self.env.get_template('simple_popup.js') # Single Simple marker. self.map.simple_marker(location=[45.50, -122.7]) marker_1 = list(self.map._children.values())[-1] mark_1 = mark_templ.render({'marker': marker_1.get_name(), 'lat': 45.50, 'lon': -122.7, 'icon': "{icon:new L.Icon.Default()}"}) assert (''.join(mark_1.split())[:-1] in ''.join(self.map.get_root().render().split())) # assert self.map.template_vars['custom_markers'][0][2] == "" # Test Simple marker addition. self.map.simple_marker(location=[45.60, -122.8], popup='Hi') marker_2 = list(self.map._children.values())[-1] popup_2 = list(marker_2._children.values())[-1] html_2 = list(popup_2.html._children.values())[0] mark_2 = mark_templ.render({'marker': marker_2.get_name(), 'lat': 45.60, 'lon': -122.8, 'icon': "{icon:new L.Icon.Default()}"}) pop_2 = popup_templ.render({'pop_name': popup_2.get_name(), 'pop_txt': 'Hi', 'html_name': html_2.get_name(), 'width': 300}) # assert self.map.mark_cnt['simple'] == 2 assert (''.join(mark_2.split())[:-1] in ''.join(self.map.get_root().render().split())) assert (''.join(pop_2.split())[:-1] in ''.join(self.map.get_root().render().split())) # assert self.map.template_vars['custom_markers'][1][2] == pop_2 # Test no popup. self.map.simple_marker(location=[45.60, -122.8]) for child in list(self.map._children.values())[-1]._children.values(): assert not isinstance(child, Popup) def test_div_markers(self): '''Test div marker list addition''' self.map = folium.Map(location=[37.421114, -122.128314]) icon_templ = self.env.get_template('static_div_icon.js') mark_templ = self.env.get_template('simple_marker.js') popup_templ = self.env.get_template('simple_popup.js') # Test with popups (expected use case). locations = [[37.421114, -122.128314], [37.391637, -122.085416], [37.388832, -122.087709]] popups = ['1437494575531', '1437492135937', '1437493590434'] self.map.div_markers(locations=locations, popups=popups) markers = [marker for marker in self.map._children.values() if isinstance(marker, Marker)] assert len(markers) == 3 for marker, location, pop in zip(markers, locations, popups): icon = list(marker._children.values())[0] popup = list(marker._children.values())[1] html = list(popup.html._children.values())[0] assert isinstance(icon, DivIcon) assert isinstance(popup, Popup) assert isinstance(html, Html) icon_1 = icon_templ.render({'icon_name': icon.get_name(), 'size': 10}) mark_1 = mark_templ.render({'marker': marker.get_name(), 'lat': location[0], 'lon': location[1], 'icon': "{icon:new L.Icon.Default()}"}) popup_1 = popup_templ.render({'pop_name': popup.get_name(), 'html_name': html.get_name(), 'pop_txt': '{}'.format(pop), 'width': 300}) out = ''.join(self.map.get_root().render().split()) assert ''.join(icon_1.split())[:-1] in out assert ''.join(mark_1.split())[:-1] in out assert ''.join(popup_1.split())[:-1] in out # Test no popup. If there are no popups, # then we should get a RuntimeError. with pytest.raises(TypeError): self.map.div_markers([[45.60, -122.8]]) def test_circle_marker(self): """Test circle marker additions.""" self.map = folium.Map(location=[45.60, -122.8]) circ_templ = self.env.get_template('circle_marker.js') # Single Circle marker. self.map.circle_marker(location=[45.60, -122.8], popup='Hi') marker = list(self.map._children.values())[-1] circle_1 = circ_templ.render({'circle': marker.get_name(), 'lat': 45.60, 'lon': -122.8, 'radius': 500, 'line_color': 'black', 'fill_color': 'black', 'fill_opacity': 0.6}) assert (''.join(circle_1.split())[:-1] in ''.join(self.map.get_root().render().split())) # Second circle marker. self.map.circle_marker(location=[45.70, -122.9], popup='Hi') marker = list(self.map._children.values())[-1] circle_2 = circ_templ.render({'circle': marker.get_name(), 'lat': 45.70, 'lon': -122.9, 'radius': 500, 'line_color': 'black', 'fill_color': 'black', 'fill_opacity': 0.6}) assert (''.join(circle_2.split())[:-1] in ''.join(self.map.get_root().render().split())) def test_poly_marker(self): """Test polygon marker.""" self.map = folium.Map(location=[45.5, -122.5]) poly_temp = self.env.get_template('poly_marker.js') self.map.polygon_marker(location=[45.5, -122.5]) marker = list(self.map._children.values())[-1] polygon = poly_temp.render({'marker': marker.get_name(), 'lat': 45.5, 'lon': -122.5, 'line_color': 'black', 'line_opacity': 1, 'line_weight': 2, 'fill_color': 'blue', 'fill_opacity': 1, 'num_sides': 4, 'rotation': 0, 'radius': 15}) assert ((''.join(polygon.split()))[-1] in ''.join(self.map.get_root().render().split())) def test_latlng_pop(self): """Test lat/lon popovers.""" self.map.lat_lng_popover() pop = list(self.map._children.values())[-1] tmpl = 'lat_lng_popover.js' pop_templ = self.env.get_template(tmpl).render(popup=pop.get_name(), map=self.map.get_name()) assert ((''.join(pop_templ.split()))[:-1] in ''.join(self.map.get_root().render().split())) def test_click_for_marker(self): """Test click for marker functionality.""" # Lat/lon popover. self.map = folium.Map([46, 3]) self.map.click_for_marker() click_templ = self.env.get_template('click_for_marker.js') click = click_templ.render({'popup': ('"Latitude: " + lat + "<br>' 'Longitude: " + lng '), 'map': self.map.get_name()}) assert ((''.join(click.split()))[:-1] in ''.join(self.map.get_root().render().split())) # Custom popover. self.map.click_for_marker(popup='Test') click_templ = self.env.get_template('click_for_marker.js') click = click_templ.render({'popup': '"Test"', 'map': self.map.get_name()}) assert ((''.join(click.split()))[:-1] in ''.join(self.map.get_root().render().split())) def test_vega_popup(self): """Test vega popups.""" self.map = folium.Map([45.60, -122.8]) vega_templ = self.env.get_template('vega_marker.js') vega_parse = self.env.get_template('vega_parse.js') vis = vincent.Bar(width=675 - 75, height=350 - 50, no_data=True) data = json.loads(vis.to_json()) self.map.simple_marker(location=[45.60, -122.8], popup=(vis, 'vis.json')) marker = list(self.map._children.values())[-1] popup = list(marker._children.values())[-1] vega = list(popup._children.values())[-1] vega_str = vega_templ.render({'vega': vega.get_name(), 'popup': popup.get_name(), 'marker': marker.get_name(), 'vega_json': json.dumps(data), }) out = ''.join(self.map.get_root().render().split()) assert ''.join(vega_parse.render().split()) in out assert (''.join(vega_str.split()))[:-1] in out def test_geo_json_simple(self): """Test geojson method.""" # No data binding. self.map = folium.Map([43, -100], zoom_start=4) path = os.path.join(rootpath, 'us-counties.json') self.map.geo_json(geo_path=path) geo_json = [x for x in self.map._children.values() if isinstance(x, GeoJson)][0] color_scale = [x for x in self.map._children.values() if isinstance(x, ColorScale)][0] geo_json_style = list(geo_json._children.values())[0] out = ''.join(self.map._parent.render().split()) # Verify the geo_json object obj_temp = self.env.get_template('geo_json.js') obj = obj_temp.render(this=geo_json) assert ''.join(obj.split())[:-1] in out # Verify the style assert geo_json_style.color == 'black' assert geo_json_style.weight == 1 assert geo_json_style.opacity == 1 assert geo_json_style.fill_color == 'blue' assert geo_json_style.fill_opacity == 0.6 assert geo_json_style.dash_array == 0 style = geo_json_style._template.module.script(geo_json_style) assert ''.join(style.split())[:-1] in out # Verify the color_scale colorsc_temp = self.env.get_template('color_scale.js') colorsc = colorsc_temp.render(this=color_scale) assert ''.join(colorsc.split())[:-1] in out def test_geo_json_bad_color(self): """Test geojson method.""" self.map = folium.Map([43, -100], zoom_start=4) path = os.path.join(rootpath, 'us-counties.json') # Data binding incorrect color value error. data = setup_data() with pytest.raises(ValueError): self.map.geo_json(path, data=data, columns=['FIPS_Code', 'Unemployed_2011'], key_on='feature.id', fill_color='blue') def test_geo_json_bad_threshold_scale(self): """Test geojson method.""" self.map = folium.Map([43, -100], zoom_start=4) path = os.path.join(rootpath, 'us-counties.json') # Data binding threshold_scale too long. data = setup_data() with pytest.raises(ValueError): self.map.geo_json(path, data=data, columns=['FIPS_Code', 'Unemployed_2011'], key_on='feature.id', threshold_scale=[1, 2, 3, 4, 5, 6, 7], fill_color='YlGnBu') def test_geo_json_data_binding(self): """Test geojson method.""" data = setup_data() self.map = folium.Map([43, -100], zoom_start=4) path = os.path.join(rootpath, 'us-counties.json') # With DataFrame data binding, default threshold scale. self.map.geo_json(geo_path=path, data=data, threshold_scale=[4.0, 1000.0, 3000.0, 5000.0, 9000.0], columns=['FIPS_Code', 'Unemployed_2011'], key_on='feature.id', fill_color='YlGnBu', reset=True) out = self.map._parent.render() geo_json = [x for x in self.map._children.values() if isinstance(x, GeoJson)][0] color_scale = [x for x in self.map._children.values() if isinstance(x, ColorScale)][0] geo_json_style = list(geo_json._children.values())[0] # Verify the geo_json object. obj_temp = self.env.get_template('geo_json.js') obj = obj_temp.render(this=geo_json) assert ''.join(obj.split())[:-1] in ''.join(out.split()) # Verify the style. assert geo_json_style.color == 'black' assert geo_json_style.weight == 1 assert geo_json_style.opacity == 1 assert geo_json_style.fill_color == 'YlGnBu' assert geo_json_style.fill_opacity == 0.6 assert geo_json_style.dash_array == 0 style = geo_json_style._template.module.script(geo_json_style) assert ''.join(style.split())[:-1] in ''.join(out.split()) # Verify the colorscale domain = [4.0, 1000.0, 3000.0, 5000.0, 9000.0] palette = folium.utilities.color_brewer('YlGnBu') d3range = palette[0: len(domain) + 2] colorscale_obj = [val for key, val in self.map._children.items() if isinstance(val, ColorScale)][0] colorscale_temp = self.env.get_template('d3_threshold.js') colorscale = colorscale_temp.render({ 'this': colorscale_obj, 'domain': domain, 'range': d3range}) assert ''.join(colorscale.split())[:-1] in ''.join(out.split()) def test_topo_json(self): """Test geojson method.""" self.map = folium.Map([43, -100], zoom_start=4) # Adding TopoJSON as additional layer. path = os.path.join(rootpath, 'or_counties_topo.json') self.map.geo_json(geo_path=path, topojson='objects.or_counties_geo') out = self.map._parent.render() # Verify TopoJson topo_json = [val for key, val in self.map._children.items() if isinstance(val, TopoJson)][0] topojson_str = topo_json._template.module.script(topo_json) assert ''.join(topojson_str.split())[:-1] in ''.join(out.split()) def test_map_build(self): """Test map build.""" # Standard map. self.setup() out = self.map._parent.render() html_templ = self.env.get_template('fol_template.html') tile_layers = [ {'id': 'tile_layer_'+'0'*32, 'address': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', 'attr': ('Map data (c) <a href="http://openstreetmap.org">' 'OpenStreetMap</a> contributors'), 'max_zoom': 20, 'min_zoom': 1, 'detect_retina': False, }] tmpl = {'map_id': 'map_' + '0' * 32, 'lat': 45.5236, 'lon': -122.675, 'size': 'width: 900.0px; height: 400.0px;', 'zoom_level': 4, 'min_lat': -90, 'max_lat': 90, 'min_lon': -180, 'max_lon': 180, 'tile_layers': tile_layers} HTML = html_templ.render(tmpl, plugins={}) assert ''.join(out.split()) == ''.join(HTML.split()) def test_tile_attr_unicode(self): """Test tile attribution unicode Test not cover b'юникод' because for python 3 bytes can only contain ASCII literal characters. """ if not PY3: map = folium.Map(location=[45.5236, -122.6750], tiles='test', attr=b'unicode') map._parent.render() else: map = folium.Map(location=[45.5236, -122.6750], tiles='test', attr=u'юникод') map._parent.render() map = folium.Map(location=[45.5236, -122.6750], tiles='test', attr='юникод') map._parent.render() def test_create_map(self): """Test create map.""" map = folium.Map(location=[45.5236, -122.6750], tiles='test', attr='юникод') # Add json data. path = os.path.join(rootpath, 'us-counties.json') data = setup_data() map.geo_json(geo_path=path, data=data, columns=['FIPS_Code', 'Unemployed_2011'], key_on='feature.id', fill_color='YlGnBu', reset=True) # Add plugins. map.polygon_marker(location=[45.5, -122.5]) # Test write. map._parent.render() map.save('map.html') def test_line(self): """Test line.""" line_temp = self.env.get_template('polyline.js') line_opts = { 'color': 'blue', 'weight': 2, 'opacity': 1 } locations = [ [[45.5236, -122.6750], [45.5236, -122.6751]], [[45.5237, -122.6750], [45.5237, -122.6751]], [[45.5238, -122.6750], [45.5238, -122.6751]] ] self.setup() self.map.line(locations=locations, line_color=line_opts['color'], line_weight=line_opts['weight'], line_opacity=line_opts['opacity']) polyline = [val for key, val in self.map._children.items() if isinstance(val, PolyLine)][0] out = self.map._parent.render() line_rendered = line_temp.render({'line': 'line_1', 'this': polyline, 'locations': locations, 'options': line_opts}) assert ''.join(line_rendered.split()) in ''.join(out.split()) def test_multi_polyline(self): """Test multi_polyline.""" multiline_temp = self.env.get_template('multi_polyline.js') multiline_opts = {'color': 'blue', 'weight': 2, 'opacity': 1} locations = [[[45.5236, -122.6750], [45.5236, -122.6751]], [[45.5237, -122.6750], [45.5237, -122.6751]], [[45.5238, -122.6750], [45.5238, -122.6751]]] self.setup() self.map.multiline(locations=locations, line_color=multiline_opts['color'], line_weight=multiline_opts['weight'], line_opacity=multiline_opts['opacity']) multipolyline = [val for key, val in self.map._children.items() if isinstance(val, MultiPolyLine)][0] out = self.map._parent.render() multiline_rendered = multiline_temp.render({'multiline': 'multiline_1', 'this': multipolyline, 'locations': locations, 'options': multiline_opts}) assert ''.join(multiline_rendered.split()) in ''.join(out.split()) def test_fit_bounds(self): """Test fit_bounds.""" bounds = ((52.193636, -2.221575), (52.636878, -1.139759)) self.setup() self.map.fit_bounds(bounds) fitbounds = [val for key, val in self.map._children.items() if isinstance(val, FitBounds)][0] out = self.map._parent.render() fit_bounds_tpl = self.env.get_template('fit_bounds.js') fit_bounds_rendered = fit_bounds_tpl.render({ 'bounds': json.dumps(bounds), 'this': fitbounds, 'fit_bounds_options': {}, }) assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split()) self.setup() self.map.fit_bounds(bounds, max_zoom=15, padding=(3, 3)) fitbounds = [val for key, val in self.map._children.items() if isinstance(val, FitBounds)][0] out = self.map._parent.render() fit_bounds_tpl = self.env.get_template('fit_bounds.js') fit_bounds_rendered = fit_bounds_tpl.render({ 'bounds': json.dumps(bounds), 'fit_bounds_options': json.dumps({'maxZoom': 15, 'padding': (3, 3), }, sort_keys=True), 'this': fitbounds, }) assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split()) def test_image_overlay(self): """Test image overlay.""" # from numpy.random import random from folium.utilities import write_png # import base64 data = [[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 0, 0.5], [0, 0, 1, 1], [0, 0, 1, 1]]] min_lon, max_lon, min_lat, max_lat = -90.0, 90.0, -180.0, 180.0 self.setup() image_url = 'data.png' self.map.image_overlay(data, filename=image_url) out = self.map._parent.render() imageoverlay = [val for key, val in self.map._children.items() if isinstance(val, ImageOverlay)][0] png_str = write_png(data) # with open('data.png', 'wb') as f: # f.write(png_str) png = "data:image/png;base64,{}".format inline_image_url = png(base64.b64encode(png_str).decode('utf-8')) image_tpl = self.env.get_template('image_layer.js') image_name = 'Image_Overlay' image_opacity = 0.25 image_bounds = [[min_lon, min_lat], [max_lon, max_lat]] image_rendered = image_tpl.render({'image_name': image_name, 'this': imageoverlay, 'image_url': image_url, 'image_bounds': image_bounds, 'image_opacity': image_opacity}) assert ''.join(image_rendered.split()) in ''.join(out.split()) self.setup() self.map.image_overlay(data, mercator_project=True) out = self.map._parent.render() imageoverlay = [val for key, val in self.map._children.items() if isinstance(val, ImageOverlay)][0] image_rendered = image_tpl.render({'image_name': image_name, 'this': imageoverlay, 'image_url': inline_image_url, 'image_bounds': image_bounds, 'image_opacity': image_opacity}) assert ''.join(image_rendered.split()) in ''.join(out.split()) def test_custom_icon(self): """Test CustomIcon.""" self.setup() icon_image = "http://leafletjs.com/docs/images/leaf-green.png" shadow_image = "http://leafletjs.com/docs/images/leaf-shadow.png" self.map = folium.Map([45, -100], zoom_start=4) i = folium.features.CustomIcon(icon_image, icon_size=(38, 95), icon_anchor=(22, 94), shadow_image=shadow_image, shadow_size=(50, 64), shadow_anchor=(4, 62), popup_anchor=(-3, -76),) mk = folium.map.Marker([45, -100], icon=i, popup=folium.map.Popup('Hello')) self.map.add_children(mk) self.map._parent.render()
mit
greenlin/tushare
tushare/datayes/basics.py
14
3722
#!/usr/bin/env python # -*- coding:utf-8 -*- """ Created on 2015年7月4日 @author: JimmyLiu @QQ:52799046 """ from tushare.datayes import vars as vs import pandas as pd from pandas.compat import StringIO class Basics(): def __init__(self , client): self.client = client def dy_master_secID(self, ticker='000001', partyID='', cnSpell='', assetClass='', field=''): """ 证券编码及基本上市信息 getSecID 输入一个或多个证券交易代码,获取证券ID,证券在数据结构中的一个唯一识别的编码; 同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。 """ code, result = self.client.getData(vs.SEC_ID%(ticker, partyID, cnSpell, assetClass, field)) return _ret_data(code, result) def dy_master_tradeCal(self, exchangeCD='XSHG,XSHE', beginDate='', endDate='', field=''): """ 交易所交易日历 getTradeCal 输入交易所,选取日期范围,可查询获取日历日期当天是否开市信息 """ code, result = self.client.getData(vs.TRADE_DATE%(exchangeCD, beginDate, endDate, field)) return _ret_data(code, result) def dy_master_equInfo(self, ticker='wx', pagesize='10', pagenum='1', field=''): """ 沪深股票键盘精灵 getEquInfo 根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。 """ code, result = self.client.getData(vs.EQU_INFO%(ticker, pagesize, pagenum, field)) return _ret_data(code, result) def dy_master_region(self, field=''): """ 获取中国地域分类,以行政划分为标准。 getSecTypeRegion """ code, result = self.client.getData(vs.REGION%(field)) return _ret_data(code, result) def dy_master_regionRel(self, ticker='', typeID='', secID='', field=''): """ 获取沪深股票地域分类,以注册地所在行政区域为标准。 getSecTypeRegionRel """ code, result = self.client.getData(vs.REGION_REL%(ticker, typeID, secID, field)) return _ret_data(code, result) def dy_master_secType(self, field=''): """ 证券分类列表 一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型; 可一次获取全部分类。 getSecType """ code, result = self.client.getData(vs.SEC_TYPE%(field)) return _ret_data(code, result) def dy_master_secTypeRel(self, ticker='', typeID='101001004001001', secID='', field=''): """ 录证券每个分类的成分,证券分类可通过在getSecType获取。 getSecTypeRel """ code, result = self.client.getData(vs.SEC_TYPE_REL%(ticker, typeID, secID, field)) return _ret_data(code, result) def _ret_data(code, result): if code==200: result = result.decode('utf-8') if vs.PY3 else result df = pd.read_csv(StringIO(result)) return df else: print(result) return None
bsd-3-clause
TimBizeps/BachelorAP
V103_Biegung elastischer Stäbe/Auswertung.py
1
7054
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import scipy.constants as const from scipy.optimize import curve_fit def auswertung(material, querschnitt, einspannung, x, D, d, L, M): if einspannung == "einseitig": u = L*x**2 - x**3/3 g = const.g F = M*g # d = np.mean(k) # Δd = np.sqrt(1/(len(k)*(len(k)-1))*sum((d-k)**2)) if querschnitt == "kreisfoermig": I = np.pi/64*d**4 # ΔI = np.pi/16*d**3*Δd if querschnitt == "quadratisch": I = d**4/12 # ΔI = 1/3*d**3*Δd def f(x, m, b): return m*x + b params, cov = curve_fit(f, u, D) m = params[0] b = params[1] Δm = np.sqrt(cov[0][0]) Δb = np.sqrt(cov[1][1]) E = F/(2*I*m) # ΔE = np.sqrt((F/(2*I**2*m)*ΔI)**2+(F/(2*I*m**2)*Δm)**2) ΔE = np.sqrt((F/(2*I*m**2)*Δm)**2) t = np.linspace(u.min(), u.max(), 1000) plt.plot(u, 1000*D, 'rx', label='Messwerte') plt.plot(t, 1000*f(t, m, b), 'k-', label='Regressionsgerade') plt.xlim(u.min(), u.max()) plt.xlabel(r"$(Lx^2 - \frac{x^3}{3})/\mathrm{m}^3$") plt.ylabel(r"$D/\mathrm{mm}$") plt.legend(loc='best') plt.tight_layout() plt.savefig("build/plot_{}_{}_{}.pdf".format(material, querschnitt, einspannung)) plt.close() print( """ ------------------------------------------------------------------------ Material: {} Querschnitt: {} Einspannung: {} Durchmesser d: {} ± {:.5f} mm Länge L: {} cm Masse M: {} kg Flächenträgheitsmoment I: {} ± {} mm^4 Elastizitätsmodul E: {} ± {} N/m^2 Steigung m: {} ± {} Ordinatenabschnitt b: {} ± {} ------------------------------------------------------------------------ """.format(material, querschnitt, einspannung, d*1e3, 0, L*1e2, M, I*1e12, 0, E*1e0, ΔE*1e0, m, Δm, b, Δb)) if einspannung == "beidseitig": x1, x2 = np.array_split(x, 2) D1, D2 = np.array_split(D, 2) u1 = 3*L**2*x1 - 4*x1**3 u2 = 4*x2**3 - 12*L*x2**2 + 9*L**2*x2 - L**3 g = const.g F = M*g # d = np.mean(k) # Δd = np.sqrt(1/(len(k)*(len(k)-1))*sum((d-k)**2)) if querschnitt == "kreisfoermig": I = np.pi/64*d**4 # ΔI = np.pi/16*d**3*Δd if querschnitt == "quadratisch": I = d**4/12 # ΔI = 1/3*d**3*Δd def f(x, m, b): return m*x + b params1, cov1 = curve_fit(f, u1, D1) params2, cov2 = curve_fit(f, u2, D2) m1 = params1[0] m2 = params2[0] b1 = params1[1] b2 = params2[1] Δm1 = np.sqrt(cov1[0][0]) Δm2 = np.sqrt(cov2[0][0]) Δb1 = np.sqrt(cov1[1][1]) Δb2 = np.sqrt(cov2[1][1]) E1 = F/(48*I*m1) E2 = F/(48*I*m2) # ΔE1 = np.sqrt((F/(48*I**2*m1)*ΔI)**2+(F/(48*I*m1**2)*Δm1)**2) ΔE1 = np.sqrt((F/(48*I*m1**2)*Δm1)**2) # ΔE2 = np.sqrt((F/(48*I**2*m2)*ΔI)**2+(F/(48*I*m2**2)*Δm2)**2) ΔE2 = np.sqrt((F/(48*I*m2**2)*Δm2)**2) E = (E1+E2)/2 ΔE = np.sqrt(ΔE1**2+ΔE2**2)/2 t = np.linspace(u1.min(), u1.max(), 1000) plt.plot(u1, 1000*D1, 'rx', label='Messwerte') plt.plot(t, 1000*f(t, m1, b1), 'k-', label='Regressionsgerade') plt.xlim(u1.min(), u1.max()) plt.xlabel(r"$(3L^2x - 4x^3)/\mathrm{m}^3$") plt.ylabel(r"$D/\mathrm{mm}$") plt.legend(loc='best') plt.tight_layout() plt.savefig("build/plot_{}_{}_{}_1.pdf".format(material, querschnitt, einspannung)) plt.close() t = np.linspace(u2.min(), u2.max(), 1000) plt.plot(u2, 1000*D2, 'rx', label='Messwerte') plt.plot(t, 1000*f(t, m2, b2), 'k-', label='Regressionsgerade') plt.xlim(u2.min(), u2.max()) plt.xlabel(r"$(4x^3 - 12Lx^2 + 9L^2x - L^3)/\mathrm{m}^3$") plt.ylabel(r"$D/\mathrm{mm}$") plt.legend(loc='best') plt.tight_layout() plt.savefig("build/plot_{}_{}_{}_2.pdf".format(material, querschnitt, einspannung)) plt.close() print(""" ------------------------------------------------------------------------ Material: {} Querschnitt: {} Einspannung: {} Durchmesser d: {} ± {} mm Länge L: {} cm Masse M: {} kg Flächenträgheitsmoment I: {} ± {} mm^4 Elastizitätsmodul E1: {} ± {} N/m^2 Elastizitätsmodul E2: {} ± {} N/m^2 Elastizitätsmodul E: {} ± {} N/m^2 Steigung m1: {} ± {} Steigung m2: {} ± {} Ordinatenabschnitt b1: {} ± {} Ordinatenabschnitt b2: {} ± {} ------------------------------------------------------------------------ """.format(material, querschnitt, einspannung, d*1e3, 0, L*1e2, M, I*1e12, 0, E1*1e0, ΔE1*1e0, E2*1e0, ΔE2*1e0, E*1e0, ΔE*1e0, m1, Δm1, m2, Δm2, b1, Δb1, b2, Δb2)) ''' ############################################################################ # Test mit Messwerten von Philipp Leser # Aluminium, kreisförmiger Querschnitt, einseitig eingespannt # Daten einlesen x, D = np.loadtxt("data/daten_aluminium_quadratisch_beidseitig.txt", unpack=True) d = np.loadtxt("data/daten_aluminium_quadratisch_durchmesser.txt", unpack=True) L = 55.3 #[cm] M = 4.6944 #[kg] # Auswertung d *= 1e-3 L *= 1e-2 x *= 1e-2 D *= 1e-6 auswertung("Aluminium", "quadratisch", "beidseitig", x, D, d, L, M) ############################################################################ ''' # Aluminium, kreisförmiger Querschnitt, einseitig eingespannt # Daten einlesen x, D = np.loadtxt("Messing.txt", unpack=True) d = 10 #[mm] L = 40.70 #[cm] M = 2.3606 #[kg] # Auswertung d *= 1e-3 L *= 1e-2 x *= 1e-2 D *= 1e-6 auswertung("Messing", "quadratisch", "einseitig", x, D, d, L, M) # Messing, quadratischer Querschnitt, einseitig eingespannt # Daten einlesen x, D = np.loadtxt("alurund.txt", unpack=True) d = 10 #[mm] L = 34.8 #[cm] M = 1.1926 #[kg] # Auswertung d *= 1e-3 L *= 1e-2 x *= 1e-2 D *= 1e-6 auswertung("Aluminium", "kreisfoermig", "einseitig", x, D, d, L, M) # Stahl, quadratischer Querschnitt, beidseitig eingespannt # Daten einlesen x, D = np.loadtxt("alueckig.txt", unpack=True) d = 10 #[mm] L = 55.3 #[cm] M = 0 #[kg] # Auswertung d *= 1e-3 L *= 1e-2 x *= 1e-2 D *= 1e-6 auswertung("Aluminium", "quadratisch", "beidseitig" , x, D, d, L, M) x, D = np.loadtxt("alueckig2.txt", unpack=True) d = 10 #[mm] L = 55.3 #[cm] M = 3.5312 #[kg] # Auswertung d *= 1e-3 L *= 1e-2 x *= 1e-2 D *= 1e-6 auswertung("Aluminium", "quadratisch", "beidseitig", x, D, d, L, M)
gpl-3.0
GbalsaC/bitnamiP
venv/share/doc/networkx-1.7/examples/drawing/labels_and_colors.py
44
1330
#!/usr/bin/env python """ Draw a graph with matplotlib, color by degree. You must have matplotlib for this to work. """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" import matplotlib.pyplot as plt import networkx as nx G=nx.cubical_graph() pos=nx.spring_layout(G) # positions for all nodes # nodes nx.draw_networkx_nodes(G,pos, nodelist=[0,1,2,3], node_color='r', node_size=500, alpha=0.8) nx.draw_networkx_nodes(G,pos, nodelist=[4,5,6,7], node_color='b', node_size=500, alpha=0.8) # edges nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5) nx.draw_networkx_edges(G,pos, edgelist=[(0,1),(1,2),(2,3),(3,0)], width=8,alpha=0.5,edge_color='r') nx.draw_networkx_edges(G,pos, edgelist=[(4,5),(5,6),(6,7),(7,4)], width=8,alpha=0.5,edge_color='b') # some math labels labels={} labels[0]=r'$a$' labels[1]=r'$b$' labels[2]=r'$c$' labels[3]=r'$d$' labels[4]=r'$\alpha$' labels[5]=r'$\beta$' labels[6]=r'$\gamma$' labels[7]=r'$\delta$' nx.draw_networkx_labels(G,pos,labels,font_size=16) plt.axis('off') plt.savefig("labels_and_colors.png") # save as png plt.show() # display
agpl-3.0
mehdidc/scikit-learn
examples/linear_model/plot_ransac.py
250
1673
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example we see how to robustly fit a linear model to faulty data using the RANSAC algorithm. """ import numpy as np from matplotlib import pyplot as plt from sklearn import linear_model, datasets n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data model = linear_model.LinearRegression() model.fit(X, y) # Robustly fit linear model with RANSAC algorithm model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(X, y) inlier_mask = model_ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(-5, 5) line_y = model.predict(line_X[:, np.newaxis]) line_y_ransac = model_ransac.predict(line_X[:, np.newaxis]) # Compare estimated coefficients print("Estimated coefficients (true, normal, RANSAC):") print(coef, model.coef_, model_ransac.estimator_.coef_) plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers') plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers') plt.plot(line_X, line_y, '-k', label='Linear regressor') plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor') plt.legend(loc='lower right') plt.show()
bsd-3-clause
JJGO/Parallel-Computing
4 Homework 4/3 Testing/2 Analysis OLD/13/time_analysis.py
1
3821
from matplotlib import pyplot # from mpltools import style import prettyplotlib as ppl # from mpltools import layout # style.use('ggplot') # figsize = layout.figaspect(scale=1.2) ps = [1,2,4,6,8,12,16,24,28,30,31,32,33,34] # ps = [1,2,4,8] # ps = range(1,9) best_paths = {} best_paths[13] = [0, 9, 1, 8, 7, 2, 3, 4, 11, 6, 10, 12, 5] best_paths[15] = [0, 13, 9, 1, 8, 14, 7, 2, 3, 4, 11, 6, 10, 12, 5] best_costs = {} best_costs[13] = 778.211287 best_costs[15] = 782.989290 # paths_explored = { p : [] for p in ps} n = 13 modes = ["DFS_DEBUG"] for mode in modes: times = { p : [] for p in ps} changes = { p : [] for p in ps} paths = { p : [] for p in ps} pushes = { p : [] for p in ps} print mode with open("Results_"+mode+".txt",'r') as f: # for i in range(9*8): # for j in range(8): # f.readline() while(f.readline()): n = int(f.readline()[:2]) p = int(f.readline().split()[0]) path,cost = f.readline().split(':') path = map(int,path.split(',')[:-1]) cost = float(cost[:-1]) f.readline() c,np,pu = [],[],[] for i in range(p): values = map(int,f.readline().split()) c.append(values[1]) np.append(values[2]) pu.append(values[3]) assert best_costs[n] == cost and best_paths[n] == path time = f.readline().split()[-2] times[p].append(float(time)) changes[p].append(c) paths[p].append(np) pushes[p].append(pu) f.readline() f.readline() # print times for k in times: times[k] = sorted(times[k])[:20] avs = { p : sum(times[p])/len(times[p]) for p in ps} mins = { p : min(times[p]) for p in ps } for p in ps: time = times[p] av = avs[p] print "{0} & {1} - {2:.5f} {3:.5f} {4:.5f}".format(n,p,av,min(time),max(time)) # " ".join(map(str,time)) paths_per_thread = {} total_paths = {} for proc in ps: x = [sum(p) for p in paths[proc]] total_paths[proc] = sum(x)/len(x) y = [sum(p)*1.0/len(p) for p in paths[proc]] paths_per_thread[proc] = sum(y)/len(y) print "{0} - {1:09.1f} {2:09.1f} {3:7d}".format(proc, paths_per_thread[proc], total_paths[proc]*1.0/proc, total_paths[proc]) # for k in sorted(times): # print k,len(times[k]) ideals = map(lambda x: avs[ps[0]]/x,ps) fig = pyplot.figure() ppl.plot(ps,ideals, 'go-') ppl.plot(ps,[avs[p] for p in ps], 'ro-') ppl.plot(ps,[mins[p] for p in ps], 'bo-') pyplot.xlabel('Processors') pyplot.ylabel('Time (s)') pyplot.title('Running Times for n = '+str(n)) pyplot.legend(['Ideal Case','Average Case','Best Case'],loc=3) pyplot.yscale('log') pyplot.savefig(str(n)+'_'+mode+'.png') # pyplot.show() SpeedUp = { p : avs[1]/avs[p] for p in ps } Efficiency = { p : SpeedUp[p]/p for p in ps } # for n in ns: fig = pyplot.figure() ppl.plot(ps,ps, 'go-') ppl.plot(ps,[SpeedUp[p] for p in ps], 'ro-') pyplot.xlabel('Processors') pyplot.ylabel('SpeedUp') pyplot.title('Comparison of SpeedUp') pyplot.legend(['Ideal SpeedUp','n = '+str(n)],loc=2) pyplot.savefig('SpeedUp_'+mode+'.png') # pyplot.show() fig = pyplot.figure() ppl.plot(ps,[1]*len(ps), 'go-') ppl.plot(ps,[Efficiency[p] for p in ps], 'ro-') pyplot.xlabel('Processors') pyplot.ylabel('Efficiency') axes = pyplot.gca() # axes.set_xlim([1,35]) axes.set_ylim([0,1.1]) pyplot.title('Comparison of Efficiencies') pyplot.legend(['Ideal Efficiency','n = '+str(n)],loc=3) pyplot.savefig('Efficiency_'+mode+'.png') # pyplot.show()
gpl-2.0
datapythonista/pandas
pandas/tests/window/moments/test_moments_rolling_skew_kurt.py
3
5452
from functools import partial import numpy as np import pytest import pandas.util._test_decorators as td from pandas import ( DataFrame, Series, concat, isna, notna, ) import pandas._testing as tm import pandas.tseries.offsets as offsets @td.skip_if_no_scipy @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) def test_series(series, sp_func, roll_func): import scipy.stats compare_func = partial(getattr(scipy.stats, sp_func), bias=False) result = getattr(series.rolling(50), roll_func)() assert isinstance(result, Series) tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:])) @td.skip_if_no_scipy @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) def test_frame(raw, frame, sp_func, roll_func): import scipy.stats compare_func = partial(getattr(scipy.stats, sp_func), bias=False) result = getattr(frame.rolling(50), roll_func)() assert isinstance(result, DataFrame) tm.assert_series_equal( result.iloc[-1, :], frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw), check_names=False, ) @td.skip_if_no_scipy @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) def test_time_rule_series(series, sp_func, roll_func): import scipy.stats compare_func = partial(getattr(scipy.stats, sp_func), bias=False) win = 25 ser = series[::2].resample("B").mean() series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)() last_date = series_result.index[-1] prev_date = last_date - 24 * offsets.BDay() trunc_series = series[::2].truncate(prev_date, last_date) tm.assert_almost_equal(series_result[-1], compare_func(trunc_series)) @td.skip_if_no_scipy @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) def test_time_rule_frame(raw, frame, sp_func, roll_func): import scipy.stats compare_func = partial(getattr(scipy.stats, sp_func), bias=False) win = 25 frm = frame[::2].resample("B").mean() frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)() last_date = frame_result.index[-1] prev_date = last_date - 24 * offsets.BDay() trunc_frame = frame[::2].truncate(prev_date, last_date) tm.assert_series_equal( frame_result.xs(last_date), trunc_frame.apply(compare_func, raw=raw), check_names=False, ) @td.skip_if_no_scipy @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) def test_nans(sp_func, roll_func): import scipy.stats compare_func = partial(getattr(scipy.stats, sp_func), bias=False) obj = Series(np.random.randn(50)) obj[:10] = np.NaN obj[-10:] = np.NaN result = getattr(obj.rolling(50, min_periods=30), roll_func)() tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10])) # min_periods is working correctly result = getattr(obj.rolling(20, min_periods=15), roll_func)() assert isna(result.iloc[23]) assert not isna(result.iloc[24]) assert not isna(result.iloc[-6]) assert isna(result.iloc[-5]) obj2 = Series(np.random.randn(20)) result = getattr(obj2.rolling(10, min_periods=5), roll_func)() assert isna(result.iloc[3]) assert notna(result.iloc[4]) result0 = getattr(obj.rolling(20, min_periods=0), roll_func)() result1 = getattr(obj.rolling(20, min_periods=1), roll_func)() tm.assert_almost_equal(result0, result1) @pytest.mark.parametrize("minp", [0, 99, 100]) @pytest.mark.parametrize("roll_func", ["kurt", "skew"]) def test_min_periods(series, minp, roll_func): result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)() expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)() nan_mask = isna(result) tm.assert_series_equal(nan_mask, isna(expected)) nan_mask = ~nan_mask tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) @pytest.mark.parametrize("roll_func", ["kurt", "skew"]) def test_center(roll_func): obj = Series(np.random.randn(50)) obj[:10] = np.NaN obj[-10:] = np.NaN result = getattr(obj.rolling(20, center=True), roll_func)() expected = getattr(concat([obj, Series([np.NaN] * 9)]).rolling(20), roll_func)()[ 9: ].reset_index(drop=True) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("roll_func", ["kurt", "skew"]) def test_center_reindex_series(series, roll_func): # shifter index s = [f"x{x:d}" for x in range(12)] series_xp = ( getattr( series.reindex(list(series.index) + s).rolling(window=25), roll_func, )() .shift(-12) .reindex(series.index) ) series_rs = getattr(series.rolling(window=25, center=True), roll_func)() tm.assert_series_equal(series_xp, series_rs) @pytest.mark.slow @pytest.mark.parametrize("roll_func", ["kurt", "skew"]) def test_center_reindex_frame(frame, roll_func): # shifter index s = [f"x{x:d}" for x in range(12)] frame_xp = ( getattr( frame.reindex(list(frame.index) + s).rolling(window=25), roll_func, )() .shift(-12) .reindex(frame.index) ) frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)() tm.assert_frame_equal(frame_xp, frame_rs)
bsd-3-clause
aminert/scikit-learn
setup.py
143
7364
#! /usr/bin/env python # # Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com> # 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr> # License: 3-clause BSD descr = """A set of python modules for machine learning and data mining""" import sys import os import shutil from distutils.command.clean import clean as Clean if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins # This is a bit (!) hackish: we are setting a global variable so that the main # sklearn __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by scikit-learn to recursively # build the compiled extensions in sub-packages is based on the Python import # machinery. builtins.__SKLEARN_SETUP__ = True DISTNAME = 'scikit-learn' DESCRIPTION = 'A set of python modules for machine learning and data mining' with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAINTAINER = 'Andreas Mueller' MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de' URL = 'http://scikit-learn.org' LICENSE = 'new BSD' DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/' # We can actually import a restricted version of sklearn that # does not need the compiled code import sklearn VERSION = sklearn.__version__ # Optional setuptools features # We need to import setuptools early, if we want setuptools features, # as it monkey-patches the 'setup' function # For some commands, use setuptools SETUPTOOLS_COMMANDS = set([ 'develop', 'release', 'bdist_egg', 'bdist_rpm', 'bdist_wininst', 'install_egg_info', 'build_sphinx', 'egg_info', 'easy_install', 'upload', 'bdist_wheel', '--single-version-externally-managed', ]) if SETUPTOOLS_COMMANDS.intersection(sys.argv): import setuptools extra_setuptools_args = dict( zip_safe=False, # the package can run out of an .egg file include_package_data=True, ) else: extra_setuptools_args = dict() # Custom clean command to remove build artifacts class CleanCommand(Clean): description = "Remove build artifacts from the source tree" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sklearn'): for filename in filenames: if (filename.endswith('.so') or filename.endswith('.pyd') or filename.endswith('.dll') or filename.endswith('.pyc')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname == '__pycache__': shutil.rmtree(os.path.join(dirpath, dirname)) cmdclass = {'clean': CleanCommand} # Optional wheelhouse-uploader features # To automate release of binary packages for scikit-learn we need a tool # to download the packages generated by travis and appveyor workers (with # version number matching the current release) and upload them all at once # to PyPI at release time. # The URL of the artifact repositories are configured in the setup.cfg file. WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all']) if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv): import wheelhouse_uploader.cmd cmdclass.update(vars(wheelhouse_uploader.cmd)) def configuration(parent_package='', top_path=None): if os.path.exists('MANIFEST'): os.remove('MANIFEST') from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) # Avoid non-useful msg: # "Ignoring attempt to set 'name' (from ... " config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sklearn') return config def is_scipy_installed(): try: import scipy except ImportError: return False return True def is_numpy_installed(): try: import numpy except ImportError: return False return True def setup_package(): metadata = dict(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, version=VERSION, download_url=DOWNLOAD_URL, long_description=LONG_DESCRIPTION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], cmdclass=cmdclass, **extra_setuptools_args) if (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean'))): # For these actions, NumPy is not required. # # They are required to succeed without Numpy for example when # pip is used to install Scikit-learn when Numpy is not yet present in # the system. try: from setuptools import setup except ImportError: from distutils.core import setup metadata['version'] = VERSION else: if is_numpy_installed() is False: raise ImportError("Numerical Python (NumPy) is not installed.\n" "scikit-learn requires NumPy.\n" "Installation instructions are available on scikit-learn website: " "http://scikit-learn.org/stable/install.html\n") if is_scipy_installed() is False: raise ImportError("Scientific Python (SciPy) is not installed.\n" "scikit-learn requires SciPy.\n" "Installation instructions are available on scikit-learn website: " "http://scikit-learn.org/stable/install.html\n") from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == "__main__": setup_package()
bsd-3-clause
andersbll/deeppy_experimental
examples/autoencoders_mnist.py
14
2777
#!/usr/bin/env python """ Autoencoder pretraining of neural networks ========================================== """ import numpy as np import matplotlib.pyplot as plt import deeppy as dp # Fetch MNIST data dataset = dp.dataset.MNIST() x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True) # Normalize pixel intensities scaler = dp.UniformScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Prepare autoencoder input batch_size = 128 train_input = dp.Input(x_train, batch_size=batch_size) # Setup autoencoders sae = dp.StackedAutoencoder( layers=[ dp.DenoisingAutoencoder( n_out=1000, weights=dp.Parameter(dp.AutoFiller()), activation='sigmoid', corruption=0.25, ), dp.DenoisingAutoencoder( n_out=1000, weights=dp.Parameter(dp.AutoFiller()), activation='sigmoid', corruption=0.25, ), dp.DenoisingAutoencoder( n_out=1000, weights=dp.Parameter(dp.AutoFiller()), activation='sigmoid', corruption=0.25, ), ], ) # Train autoencoders layer-wise trainer = dp.StochasticGradientDescent( max_epochs=25, learn_rule=dp.Momentum(learn_rate=0.05, momentum=0.9), ) for ae in sae.ae_models(): trainer.train(ae, train_input) # Train stacked autoencoders trainer.train(sae, train_input) # Setup neural network using the stacked autoencoder layers net = dp.NeuralNetwork( layers=sae.feedforward_layers() + [ dp.FullyConnected( n_out=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller()), ), ], loss=dp.SoftmaxCrossEntropy(), ) # Fine-tune neural network train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) test_input = dp.Input(x_test) trainer = dp.StochasticGradientDescent( max_epochs=25, learn_rule=dp.Momentum(learn_rate=0.05, momentum=0.9), ) trainer.train(net, train_input) # Evaluate on test data error = np.mean(net.predict(test_input) != y_test) print('Test error rate: %.4f' % error) # Plot learned features def plot_img(img, title): plt.figure() plt.imshow(img, cmap='gray', interpolation='nearest') plt.axis('off') plt.title(title) plt.tight_layout() w = np.array(sae.layers[0].weights.array) w = np.reshape(w.T, (-1,) + dataset.img_shape) sortidx = np.argsort(np.std(w, axis=(1, 2)))[-64:] plot_img(dp.misc.img_tile(dp.misc.img_stretch(w[sortidx])), 'Autoencoder features') # Plot learned features in first layer w = np.array(net.layers[0].weights.array) w = np.reshape(w.T, (-1,) + dataset.img_shape) plot_img(dp.misc.img_tile(dp.misc.img_stretch(w[sortidx])), 'Fine-tuned features')
mit
herzog31/acn-project
3 Mininet Topology/plotter.py
1
5532
''' Created on 09.12.2014 ACN Project Plotter @author: Group 19 ''' import argparse from matplotlib import pyplot def create_boxplot_figure(ftp_measurement, http_measurement, ssh_measurement): ''' Reads the input files (which may be None) and plots the results. If None is passed, nothing will be plotted for that protocol. Returns a figure containing the box-plot. ''' # List of value-lists for the three protocols values_to_plot = [] # List of protocol labels labels = [] # Check for all three lists # Append the list of results from one protocol to the list of value-lists if ftp_measurement is not None and ftp_measurement: values_to_plot.append(ftp_measurement) labels.append("FTP") if http_measurement is not None and http_measurement: values_to_plot.append(http_measurement) labels.append("HTTP") if ssh_measurement is not None and ssh_measurement: values_to_plot.append(ssh_measurement) labels.append("SSH") # Create one plot-figure fig = pyplot.figure(1, figsize=(9, 6)) # Add values to the figure and create the box-plot ax = fig.add_subplot(111) ax.boxplot(values_to_plot) # Set the labels of the x-axis ax.set_xticklabels(labels) return fig def save_plot_to_file(fig, filename): ''' Stores the plot into the file. If None is passed this function does nothing. ''' # Do nothing if no file is specified if filename is None: return # Nothing to store if fig is None if fig is None: return # Save the figure to file; the extensions determines the file-type fig.savefig(filename) def read_values_from_file(filename): ''' Reads the values (separated by newlines) and returns them as a list of float-values. If None is passed, the function returns an empty list. ''' # Return empty list if None is passed as filename if filename is None: return [] # Open the file, read the content and store every line in a list # Every line has to contain one measurement value values = open(filename, 'r').read().splitlines() # Values are read as strings; cast them to floats for plotting values = [float(s) for s in values] return values def main(): ''' This is the main function that first parses the passed arguments (the files containing the values) and then saves the result as a file ''' # Initialize argument parser parser = argparse.ArgumentParser() # Add argument for reading the measurement results of the FTP message # exchange parser.add_argument( "--ftp_measurement", help="Path to the file containing the measurement values for the FTP" + " message exchange. If no file is specified, this protocol will be" + " ignored. The values are floating point values and have to be" + " separated by line-breaks.", type=str, default=None) # Add argument for reading the measurement results of the HTTP message # exchange parser.add_argument( "--http_measurement", help="Path to the file containing the measurement values for the HTTP" + " message exchange. If no file is specified, this protocol will be" + " ignored. The values are floating point values and have to be" + " separated by line-breaks.", type=str, default=None) # Add argument for reading the measurement results of the SSH message # exchange parser.add_argument( "--ssh_measurement", help="Path to the file containing the measurement values for the SSH" + " message exchange. If no file is specified, this protocol will be" + " ignored. The values are floating point values and have to be" + " separated by line-breaks.", type=str, default=None) # Add argument for the file the plot should be stored in parser.add_argument( "--output_filename", help="Writes the plot to the file specified. The format has to be" + " determined by appending the corresponding extension to the filename." + " If no filename was specified, then no file will be written." + " Existing files will be overwritten. " + " Supported formats (any other will lead to an error!):" + " eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff", type=str, default=None) # Choice whether to show the plot parser.add_argument( "--show_plot", help="Shows the plot on the screen.", action="store_true") args = parser.parse_args() # Read values from file and store them in a list # Do that for all three protocols (FTP, HTTP, SSH) ftp_measurement_values = read_values_from_file( args.ftp_measurement) http_measurement_values = read_values_from_file( args.http_measurement) ssh_measurement_values = read_values_from_file( args.ssh_measurement) # Create and store the figure containing the box-plot fig = create_boxplot_figure( ftp_measurement_values, http_measurement_values, ssh_measurement_values) # If the output filename is not None, save the plot to file if args.output_filename is not None: save_plot_to_file(fig, args.output_filename) # If show_plot is true, show the plot on the screen if args.show_plot: pyplot.show() if __name__ == '__main__': main()
mit
quheng/scikit-learn
sklearn/feature_extraction/tests/test_dict_vectorizer.py
276
3790
# Authors: Lars Buitinck <L.J.Buitinck@uva.nl> # Dan Blanchard <dblanchard@ets.org> # License: BSD 3 clause from random import Random import numpy as np import scipy.sparse as sp from numpy.testing import assert_array_equal from sklearn.utils.testing import (assert_equal, assert_in, assert_false, assert_true) from sklearn.feature_extraction import DictVectorizer from sklearn.feature_selection import SelectKBest, chi2 def test_dictvectorizer(): D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}] for sparse in (True, False): for dtype in (int, np.float32, np.int16): for sort in (True, False): for iterable in (True, False): v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) X = v.fit_transform(iter(D) if iterable else D) assert_equal(sp.issparse(X), sparse) assert_equal(X.shape, (3, 5)) assert_equal(X.sum(), 14) assert_equal(v.inverse_transform(X), D) if sparse: # CSR matrices can't be compared for equality assert_array_equal(X.A, v.transform(iter(D) if iterable else D).A) else: assert_array_equal(X, v.transform(iter(D) if iterable else D)) if sort: assert_equal(v.feature_names_, sorted(v.feature_names_)) def test_feature_selection(): # make two feature dicts with two useful features and a bunch of useless # ones, in terms of chi2 d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20) d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1) for indices in (True, False): v = DictVectorizer().fit([d1, d2]) X = v.transform([d1, d2]) sel = SelectKBest(chi2, k=2).fit(X, [0, 1]) v.restrict(sel.get_support(indices=indices), indices=indices) assert_equal(v.get_feature_names(), ["useful1", "useful2"]) def test_one_of_k(): D_in = [{"version": "1", "ham": 2}, {"version": "2", "spam": .3}, {"version=3": True, "spam": -1}] v = DictVectorizer() X = v.fit_transform(D_in) assert_equal(X.shape, (3, 5)) D_out = v.inverse_transform(X) assert_equal(D_out[0], {"version=1": 1, "ham": 2}) names = v.get_feature_names() assert_true("version=2" in names) assert_false("version" in names) def test_unseen_or_no_features(): D = [{"camelot": 0, "spamalot": 1}] for sparse in [True, False]: v = DictVectorizer(sparse=sparse).fit(D) X = v.transform({"push the pram a lot": 2}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) X = v.transform({}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) try: v.transform([]) except ValueError as e: assert_in("empty", str(e)) def test_deterministic_vocabulary(): # Generate equal dictionaries with different memory layouts items = [("%03d" % i, i) for i in range(1000)] rng = Random(42) d_sorted = dict(items) rng.shuffle(items) d_shuffled = dict(items) # check that the memory layout does not impact the resulting vocabulary v_1 = DictVectorizer().fit([d_sorted]) v_2 = DictVectorizer().fit([d_shuffled]) assert_equal(v_1.vocabulary_, v_2.vocabulary_)
bsd-3-clause
ishank08/scikit-learn
sklearn/preprocessing/data.py
15
68211
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings from itertools import combinations_with_replacement as combinations_w_r import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils.extmath import row_norms from ..utils.extmath import _incremental_mean_and_var from ..utils.fixes import bincount from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. See also -------- StandardScaler: Performs scaling to unit variance using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* See also -------- minmax_scale: Equivalent function without the object oriented API. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MinMaxScaler: Performs scaling to a given range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. """ # noqa def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- maxabs_scale: Equivalent function without the object oriented API. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- robust_scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. https://en.wikipedia.org/wiki/Median_(statistics) https://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.quantile_range = quantile_range self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q_min, q_max = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) q = np.percentile(X, self.quantile_range, axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- RobustScaler: Performs centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, quantile_range=quantile_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_output_features, n_input_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(bincount(c, minlength=self.n_input_features_) for c in combinations) def get_feature_names(self, input_features=None): """ Return feature names for output features Parameters ---------- input_features : list of string, length n_features, optional String names for input features if available. By default, "x0", "x1", ... "xn_features" is used. Returns ------- output_feature_names : list of string, length n_output_features """ powers = self.powers_ if input_features is None: input_features = ['x%d' % i for i in range(powers.shape[1])] feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join("%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds])) else: name = "1" feature_names.append(name) return feature_names def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : boolean, default False whether to return the computed norms Returns ------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Normalized input X. norms : array, shape [n_samples] if axis=1 else [n_features] An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See also -------- Normalizer: Performs normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in ('l1', 'l2'): raise NotImplementedError("return_norm=True is not implemented " "for sparse matrices with norm 'l1' " "or norm 'l2'") if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- normalize: Equivalent function without the object oriented API. """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- Binarizer: Performs binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- binarize: Equivalent function without the object oriented API. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K @property def _pairwise(self): return True def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if isinstance(selected, six.string_types) and selected == "all": return transform(X) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Note: a one-hot encoding of y labels should use a LabelBinarizer instead. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features : "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and four samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all fashion. sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of iterables and a multilabel format, e.g. a (samples x classes) binary matrix indicating the presence of a class label. sklearn.preprocessing.LabelEncoder : encodes labels with values between 0 and n_classes-1. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
Akshay0724/scikit-learn
sklearn/utils/tests/test_extmath.py
19
24513
# Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis Engemann <denis-alexander.engemann@inria.fr> # # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import linalg from scipy import stats from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import skip_if_32bit from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import np_version from sklearn.utils.extmath import density from sklearn.utils.extmath import logsumexp from sklearn.utils.extmath import norm, squared_norm from sklearn.utils.extmath import randomized_svd from sklearn.utils.extmath import row_norms from sklearn.utils.extmath import weighted_mode from sklearn.utils.extmath import cartesian from sklearn.utils.extmath import log_logistic from sklearn.utils.extmath import fast_dot, _fast_dot from sklearn.utils.extmath import svd_flip from sklearn.utils.extmath import _incremental_mean_and_var from sklearn.utils.extmath import _deterministic_vector_sign_flip from sklearn.utils.extmath import softmax from sklearn.utils.extmath import stable_cumsum from sklearn.datasets.samples_generator import make_low_rank_matrix def test_density(): rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 5)) X[1, 2] = 0 X[5, 3] = 0 X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_coo = sparse.coo_matrix(X) X_lil = sparse.lil_matrix(X) for X_ in (X_csr, X_csc, X_coo, X_lil): assert_equal(density(X_), density(X)) def test_uniform_weights(): # with uniform weights, results should be identical to stats.mode rng = np.random.RandomState(0) x = rng.randint(10, size=(10, 5)) weights = np.ones(x.shape) for axis in (None, 0, 1): mode, score = stats.mode(x, axis) mode2, score2 = weighted_mode(x, weights, axis) assert_array_equal(mode, mode2) assert_array_equal(score, score2) def test_random_weights(): # set this up so that each row should have a weighted mode of 6, # with a score that is easily reproduced mode_result = 6 rng = np.random.RandomState(0) x = rng.randint(mode_result, size=(100, 10)) w = rng.random_sample(x.shape) x[:, :5] = mode_result w[:, :5] += 1 mode, score = weighted_mode(x, w, axis=1) assert_array_equal(mode, mode_result) assert_array_almost_equal(score.ravel(), w[:, :5].sum(1)) def test_logsumexp(): # Try to add some smallish numbers in logspace x = np.array([1e-40] * 1000000) logx = np.log(x) assert_almost_equal(np.exp(logsumexp(logx)), x.sum()) X = np.vstack([x, x]) logX = np.vstack([logx, logx]) assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) def test_randomized_svd_low_rank(): # Check that extmath.randomized_svd is consistent with linalg.svd n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X of approximate effective rank `rank` and no noise # component (very structured signal): X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method U, s, V = linalg.svd(X, full_matrices=False) for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable # compute the singular values of X using the fast approximate method Ua, sa, Va = \ randomized_svd(X, k, power_iteration_normalizer=normalizer, random_state=0) assert_equal(Ua.shape, (n_samples, k)) assert_equal(sa.shape, (k,)) assert_equal(Va.shape, (k, n_features)) # ensure that the singular values of both methods are equal up to the # real rank of the matrix assert_almost_equal(s[:k], sa) # check the singular vectors too (while not checking the sign) assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va)) # check the sparse matrix representation X = sparse.csr_matrix(X) # compute the singular values of X using the fast approximate method Ua, sa, Va = \ randomized_svd(X, k, power_iteration_normalizer=normalizer, random_state=0) assert_almost_equal(s[:rank], sa[:rank]) def test_norm_squared_norm(): X = np.random.RandomState(42).randn(50, 63) X *= 100 # check stability X += 200 assert_almost_equal(np.linalg.norm(X.ravel()), norm(X)) assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6) assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6) def test_row_norms(): X = np.random.RandomState(42).randn(100, 100) for dtype in (np.float32, np.float64): if dtype is np.float32: precision = 4 else: precision = 5 X = X.astype(dtype) sq_norm = (X ** 2).sum(axis=1) assert_array_almost_equal(sq_norm, row_norms(X, squared=True), precision) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision) Xcsr = sparse.csr_matrix(X, dtype=dtype) assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), precision) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision) def test_randomized_svd_low_rank_with_noise(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X wity structure approximate rank `rank` and an # important noisy component X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.1, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) for normalizer in ['auto', 'none', 'LU', 'QR']: # compute the singular values of X using the fast approximate # method without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0, power_iteration_normalizer=normalizer, random_state=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.01) # compute the singular values of X using the fast approximate # method with iterated power method _, sap, _ = randomized_svd(X, k, power_iteration_normalizer=normalizer, random_state=0) # the iterated power method is helping getting rid of the noise: assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_infinite_rank(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # let us try again without 'low_rank component': just regularly but slowly # decreasing singular values: the rank of the data matrix is infinite X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=1.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) for normalizer in ['auto', 'none', 'LU', 'QR']: # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0, power_iteration_normalizer=normalizer) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.1) # compute the singular values of X using the fast approximate method # with iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5, power_iteration_normalizer=normalizer) # the iterated power method is still managing to get most of the # structure at the requested rank assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_transpose_consistency(): # Check that transposing the design matrix has limited impact n_samples = 100 n_features = 500 rank = 4 k = 10 X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0) U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0) U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto', random_state=0) U4, s4, V4 = linalg.svd(X, full_matrices=False) assert_almost_equal(s1, s4[:k], decimal=3) assert_almost_equal(s2, s4[:k], decimal=3) assert_almost_equal(s3, s4[:k], decimal=3) assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2) assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2) # in this case 'auto' is equivalent to transpose assert_almost_equal(s2, s3) def test_randomized_svd_power_iteration_normalizer(): # randomized_svd with power_iteration_normalized='none' diverges for # large number of power iterations on this dataset rng = np.random.RandomState(42) X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng) X += 3 * rng.randint(0, 2, size=X.shape) n_components = 50 # Check that it diverges with many (non-normalized) power iterations U, s, V = randomized_svd(X, n_components, n_iter=2, power_iteration_normalizer='none') A = X - U.dot(np.diag(s).dot(V)) error_2 = linalg.norm(A, ord='fro') U, s, V = randomized_svd(X, n_components, n_iter=20, power_iteration_normalizer='none') A = X - U.dot(np.diag(s).dot(V)) error_20 = linalg.norm(A, ord='fro') assert_greater(np.abs(error_2 - error_20), 100) for normalizer in ['LU', 'QR', 'auto']: U, s, V = randomized_svd(X, n_components, n_iter=2, power_iteration_normalizer=normalizer, random_state=0) A = X - U.dot(np.diag(s).dot(V)) error_2 = linalg.norm(A, ord='fro') for i in [5, 10, 50]: U, s, V = randomized_svd(X, n_components, n_iter=i, power_iteration_normalizer=normalizer, random_state=0) A = X - U.dot(np.diag(s).dot(V)) error = linalg.norm(A, ord='fro') assert_greater(15, np.abs(error_2 - error)) def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, V = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T U, S, V = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6) def test_randomized_svd_sign_flip(): a = np.array([[2.0, 0.0], [0.0, 1.0]]) u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41) for seed in range(10): u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed) assert_almost_equal(u1, u2) assert_almost_equal(v1, v2) assert_almost_equal(np.dot(u2 * s2, v2), a) assert_almost_equal(np.dot(u2.T, u2), np.eye(2)) assert_almost_equal(np.dot(v2.T, v2), np.eye(2)) def test_randomized_svd_sign_flip_with_transpose(): # Check if the randomized_svd sign flipping is always done based on u # irrespective of transpose. # See https://github.com/scikit-learn/scikit-learn/issues/5608 # for more details. def max_loading_is_positive(u, v): """ returns bool tuple indicating if the values maximising np.abs are positive across all rows for u and across all columns for v. """ u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all() v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all() return u_based, v_based mat = np.arange(10 * 8).reshape(10, -1) # Without transpose u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True) u_based, v_based = max_loading_is_positive(u_flipped, v_flipped) assert_true(u_based) assert_false(v_based) # With transpose u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd( mat, 3, flip_sign=True, transpose=True) u_based, v_based = max_loading_is_positive( u_flipped_with_transpose, v_flipped_with_transpose) assert_true(u_based) assert_false(v_based) def test_cartesian(): # Check if cartesian product delivers the right results axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7])) true_out = np.array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) out = cartesian(axes) assert_array_equal(true_out, out) # check single axis x = np.arange(3) assert_array_equal(x[:, np.newaxis], cartesian((x,))) def test_logistic_sigmoid(): # Check correctness and robustness of logistic sigmoid implementation def naive_log_logistic(x): return np.log(1 / (1 + np.exp(-x))) x = np.linspace(-2, 2, 50) assert_array_almost_equal(log_logistic(x), naive_log_logistic(x)) extreme_x = np.array([-100., 100.]) assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) def test_fast_dot(): # Check fast dot blas wrapper function if fast_dot is np.dot: return rng = np.random.RandomState(42) A = rng.random_sample([2, 10]) B = rng.random_sample([2, 10]) try: linalg.get_blas_funcs(['gemm'])[0] has_blas = True except (AttributeError, ValueError): has_blas = False if has_blas: # Test _fast_dot for invalid input. # Maltyped data. for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]: assert_raises(ValueError, _fast_dot, A.astype(dt1), B.astype(dt2).T) # Malformed data. # ndim == 0 E = np.empty(0) assert_raises(ValueError, _fast_dot, E, E) # ndim == 1 assert_raises(ValueError, _fast_dot, A, A[0]) # ndim > 2 assert_raises(ValueError, _fast_dot, A.T, np.array([A, A])) # min(shape) == 1 assert_raises(ValueError, _fast_dot, A, A[0, :][None, :]) # test for matrix mismatch error assert_raises(ValueError, _fast_dot, A, A) # Test cov-like use case + dtypes. for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) # col < row C = np.dot(A.T, A) C_ = fast_dot(A.T, A) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A, B.T) C_ = fast_dot(A, B.T) assert_almost_equal(C, C_, decimal=5) # Test square matrix * rectangular use case. A = rng.random_sample([2, 2]) for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) C = np.dot(A, B) C_ = fast_dot(A, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) if has_blas: for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]: assert_raises(ValueError, _fast_dot, x, x.T) def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from http://www.mathsisfun.com/data/standard-deviation.html A = np.array([[600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300]]).T idx = 2 X1 = A[:idx, :] X2 = A[idx:, :] old_means = X1.mean(axis=0) old_variances = X1.var(axis=0) old_sample_count = X1.shape[0] final_means, final_variances, final_count = \ _incremental_mean_and_var(X2, old_means, old_variances, old_sample_count) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) @skip_if_32bit def test_incremental_variance_numerical_stability(): # Test Youngs and Cramer incremental variance formulas. def np_var(A): return A.var(axis=0) # Naive one pass variance computation - not numerically stable # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance def one_pass_var(X): n = X.shape[0] exp_x2 = (X ** 2).sum(axis=0) / n expx_2 = (X.sum(axis=0) / n) ** 2 return exp_x2 - expx_2 # Two-pass algorithm, stable. # We use it as a benchmark. It is not an online algorithm # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm def two_pass_var(X): mean = X.mean(axis=0) Y = X.copy() return np.mean((Y - mean)**2, axis=0) # Naive online implementation # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm # This works only for chunks for size 1 def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count): updated_sample_count = (last_sample_count + 1) samples_ratio = last_sample_count / float(updated_sample_count) updated_mean = x / updated_sample_count + last_mean * samples_ratio updated_variance = last_variance * samples_ratio + \ (x - last_mean) * (x - updated_mean) / updated_sample_count return updated_mean, updated_variance, updated_sample_count # We want to show a case when one_pass_var has error > 1e-3 while # _batch_mean_variance_update has less. tol = 200 n_features = 2 n_samples = 10000 x1 = np.array(1e8, dtype=np.float64) x2 = np.log(1e-5, dtype=np.float64) A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64) A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64) A = np.vstack((A0, A1)) # Older versions of numpy have different precision # In some old version, np.var is not stable if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6: stable_var = np_var else: stable_var = two_pass_var # Naive one pass var: >tol (=1063) assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol) # Starting point for online algorithms: after A0 # Naive implementation: >tol (436) mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2 for i in range(A1.shape[0]): mean, var, n = \ naive_mean_variance_update(A1[i, :], mean, var, n) assert_equal(n, A.shape[0]) # the mean is also slightly unstable assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6) assert_greater(np.abs(stable_var(A) - var).max(), tol) # Robust implementation: <tol (177) mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2 for i in range(A1.shape[0]): mean, var, n = \ _incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])), mean, var, n) assert_equal(n, A.shape[0]) assert_array_almost_equal(A.mean(axis=0), mean) assert_greater(tol, np.abs(stable_var(A) - var).max()) def test_incremental_variance_ddof(): # Test that degrees of freedom parameter for calculations are correct. rng = np.random.RandomState(1999) X = rng.randn(50, 10) n_samples, n_features = X.shape for batch_size in [11, 20, 37]: steps = np.arange(0, X.shape[0], batch_size) if steps[-1] != X.shape[0]: steps = np.hstack([steps, n_samples]) for i, j in zip(steps[:-1], steps[1:]): batch = X[i:j, :] if i == 0: incremental_means = batch.mean(axis=0) incremental_variances = batch.var(axis=0) # Assign this twice so that the test logic is consistent incremental_count = batch.shape[0] sample_count = batch.shape[0] else: result = _incremental_mean_and_var( batch, incremental_means, incremental_variances, sample_count) (incremental_means, incremental_variances, incremental_count) = result sample_count += batch.shape[0] calculated_means = np.mean(X[:j], axis=0) calculated_variances = np.var(X[:j], axis=0) assert_almost_equal(incremental_means, calculated_means, 6) assert_almost_equal(incremental_variances, calculated_variances, 6) assert_equal(incremental_count, sample_count) def test_vector_sign_flip(): # Testing that sign flip is working & largest value has positive sign data = np.random.RandomState(36).randn(5, 5) max_abs_rows = np.argmax(np.abs(data), axis=1) data_flipped = _deterministic_vector_sign_flip(data) max_rows = np.argmax(data_flipped, axis=1) assert_array_equal(max_abs_rows, max_rows) signs = np.sign(data[range(data.shape[0]), max_abs_rows]) assert_array_equal(data, data_flipped * signs[:, np.newaxis]) def test_softmax(): rng = np.random.RandomState(0) X = rng.randn(3, 5) exp_X = np.exp(X) sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1)) assert_array_almost_equal(softmax(X), exp_X / sum_exp_X) def test_stable_cumsum(): if np_version < (1, 9): raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9") assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3])) r = np.random.RandomState(0).rand(100000) assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0) # test axis parameter A = np.random.RandomState(36).randint(1000, size=(5, 5, 5)) assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0)) assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1)) assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
bsd-3-clause
evgchz/scikit-learn
sklearn/ensemble/tests/test_forest.py
7
30960
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import product import numpy as np from scipy.sparse import csr_matrix, csc_matrix, coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.grid_search import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.validation import check_random_state from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): """Check consistency on dataset iris.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): """Check consistency on dataset boston house prices.""" ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", )): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): """Regression models should not have a classes_ attribute.""" r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): """Predict probabilities.""" ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, X, y): """Check variable importances.""" ForestClassifier = FOREST_CLASSIFIERS[name] for n_jobs in [1, 2]: clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) X_new = clf.transform(X, threshold="mean") assert_less(0 < X_new.shape[1], X.shape[1]) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=sample_weight) importances = clf.feature_importances_ assert_true(np.all(importances >= 0.0)) clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=3 * sample_weight) importances_bis = clf.feature_importances_ assert_almost_equal(importances, importances_bis) def test_importances(): X, y = datasets.make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name in FOREST_CLASSIFIERS: yield check_importances, name, X, y def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): """Check that oob prediction is a good estimation of the generalization error.""" # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): """Check that base trees can be grid-searched.""" for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): """Check pickability.""" ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): """Check estimators on multi-output problems.""" X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): """Test that n_classes_ and classes_ have proper shape.""" ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): ''' Test that the `sparse_output` parameter of RandomTreesEmbedding works by returning a dense array. ''' # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): ''' Test that the `sparse_output` parameter of RandomTreesEmbedding works by returning the same array for both argument values. ''' # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name, X, y): """Test precedence of max_leaf_nodes over max_depth. """ ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name, X, y def check_min_samples_leaf(name, X, y): """Test if leaves contain more than leaf_count training examples""" ForestEstimator = FOREST_ESTIMATORS[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): est = ForestEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def test_min_samples_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name, X, y def check_min_weight_fraction_leaf(name, X, y): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) if isinstance(est, (RandomForestClassifier, RandomForestRegressor)): est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name, X, y def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0, n_samples=40) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): """Check that it works no matter the memory layout""" est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) def test_1d_input(): X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_warm_start(name, random_state=42): """Test if fitting incrementally with warm start gives a forest of the right size and the same results as a normal fit.""" X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): """Test if fit clears state and grows a new forest when warm_start==False. """ X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): """Test if warm start second fit with smaller n_estimators raises error.""" X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): """Test if warm start with equal n_estimators does nothing and returns the same forest and raises a warning.""" X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): """Test that the warm start computes oob score when asked.""" X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
apeyser/nest-simulator
extras/ConnPlotter/examples/connplotter_tutorial.py
18
27730
# -*- coding: utf-8 -*- # # connplotter_tutorial.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. # !======================== # ! ConnPlotter: A Tutorial # !======================== # ! # ! :Author: Hans Ekkehard Plesser # ! :Institution: Norwegian University of Life Sciences, Simula # ! Research Laboratory, RIKEN Brain Sciences Institute # ! :Version: 0.7 # ! :Date: 1 December 2009 # ! :Copyright: Hans Ekkehard Plesser # ! :License: Creative Commons Attribution-Noncommercial-Share Alike License # ! v 3.0 # ! # ! :Note: For best results, you should run this script with PyReport by # ! Gael Varoquaux, available from # ! http://gael-varoquaux.info/computers/pyreport/ # ! # ! Please set using_pyreport to True if you want to run the # ! script through pyreport. Otherwise, figures will not be captured # ! correctly. using_pyreport = False # ! Introduction # !============= # ! This tutorial gives a brief introduction to the ConnPlotter # ! toolbox. It is by no means complete. # ! Avoid interactive backend when using pyreport if using_pyreport: import matplotlib matplotlib.use("Agg") # ! Import pylab to call pylab.show() so that pyreport # ! can capture figures created. Must come before import # ! ConnPlotter so we get the correct show(). import pylab # ! If not using pyreport, disable pylab.show() until we reach end of script if not using_pyreport: pylab_show = pylab.show def nop(s=None): pass pylab.show = nop # ! Import ConnPlotter and its examples import ConnPlotter as cpl import ConnPlotter.examples as ex # ! Turn of warnings about resized figure windows import warnings warnings.simplefilter("ignore") # ! Define a helper function to show LaTeX tables on the fly def showTextTable(connPattern, fileTrunk): """ Shows a Table of Connectivity as textual table. Arguments: connPattern ConnectionPattern instance fileTrunk Eventual PNG image will be fileTrunk.png """ import subprocess as subp # to call LaTeX etc import os # to remove files # Write to LaTeX file so we get a nice textual representation # We want a complete LaTeX document, so we set ``standalone`` # to ``True``. connPattern.toLaTeX(file=fileTrunk + '.tex', standalone=True, enumerate=True) # Create PDF, crop, and convert to PNG try: devnull = open('/dev/null', 'w') subp.call(['pdflatex', fileTrunk], stdout=devnull, stderr=subp.STDOUT) # need wrapper, since pdfcrop does not begin with #! subp.call(['pdfcrop ' + fileTrunk + '.pdf ' + fileTrunk + '-crop.pdf'], shell=True, stdout=devnull, stderr=subp.STDOUT) devnull.close() os.rename(fileTrunk + '-crop.pdf', fileTrunk + '.pdf') for suffix in ('.tex', '-crop.pdf', '.png', '.aux', '.log'): if os.path.exists(fileTrunk + suffix): os.remove(fileTrunk + suffix) except: raise Exception('Could not create PDF Table.') # ! Simple network # ! ============== # ! This is a simple network with two layers A and B; layer B has two # ! populations, E and I. On the NEST side, we use only synapse type # ! ``static_synapse``. ConnPlotter then infers that synapses with positive # ! weights should have type ``exc``, those with negative weight type ``inh``. # ! Those two types are know to ConnPlotter. # ! Obtain layer, connection and model list from the example set s_layer, s_conn, s_model = ex.simple() # ! Create Connection Pattern representation s_cp = cpl.ConnectionPattern(s_layer, s_conn) # ! Show pattern as textual table (we cheat a little and include PDF directly) showTextTable(s_cp, 'simple_tt') # $ \centerline{\includegraphics{simple_tt.pdf}} # ! Show pattern in full detail # ! --------------------------- # ! A separate patch is shown for each pair of populations. # ! # ! - Rows represent senders, columns targets. # ! - Layer names are given to the left/above, population names to the right # ! and below. # ! - Excitatory synapses shown in blue, inhibitory in red. # ! - Each patch has its own color scale. s_cp.plot() pylab.show() # ! Let us take a look at what this connection pattern table shows: # ! # ! - The left column, with header "A", is empty: The "A" layer receives # ! no input. # ! - The right column shows input to layer "B" # ! # ! * The top row, labeled "A", has two patches in the "B" column: # ! # ! + The left patch shows relatively focused input to the "E" population # ! in layer "B" (first row of "Connectivity" table). # ! + The right patch shows wider input to the "I" population in layer # ! "B" (second row of "Connectivity" table). # ! + Patches are red, indicating excitatory connections. # ! + In both cases, mask are circular, and the product of connection # ! weight and probability is independent of the distance between sender # ! and target neuron. # ! # ! * The grey rectangle to the bottom right shows all connections from # ! layer "B" populations to layer "B" populations. It is subdivided into # ! two rows and two columns: # ! # ! + Left column: inputs to the "E" population. # ! + Right column: inputs to the "I" population. # ! + Top row: projections from the "E" population. # ! + Bottom row: projections from the "I" population. # ! + There is only one type of synapse for each sender-target pair, # ! so there is only a single patch per pair. # ! + Patches in the top row, from population "E" show excitatory # ! connections, thus they are red. # ! + Patches in the bottom row, from population "I" show inhibitory # ! connections, thus they are blue. # ! + The patches in detail are: # ! # ! - **E to E** (top-left, row 3+4 in table): two rectangular # ! projections at 90 degrees. # ! - **E to I** (top-right, row 5 in table): narrow gaussian projection. # ! - **I to E** (bottom-left, row 6 in table): wider gaussian projection # ! - **I to I** (bottom-right, row 7 in table): circular projection # ! covering entire layer. # ! # ! - **NB:** Color scales are different, so one **cannot** compare connection # ! strengths between patches. # ! Full detail, common color scale # ! ------------------------------- s_cp.plot(globalColors=True) pylab.show() # ! This figure shows the same data as the one above, but now all patches use # ! a common color scale, so full intensity color (either red or blue) # ! indicates the strongest connectivity. From this we see that # ! # ! - A to B/E is stronger than A to B/I # ! - B/E to B/I is the strongest of all connections at the center # ! - B/I to B/E is stronger than B/I to B/I # ! Aggregate by groups # ! ------------------- # ! For each pair of population groups, sum connections of the same type # ! across populations. s_cp.plot(aggrGroups=True) pylab.show() # ! In the figure above, all excitatory connections from B to B layer have been # ! combined into one patch, as have all inhibitory connections from B to B. # ! In the upper-right corner, all connections from layer A to layer B have # ! been combined; the patch for inhibitory connections is missing, as there # ! are none. # ! Aggregate by groups and synapse models # ! -------------------------------------- s_cp.plot(aggrGroups=True, aggrSyns=True) pylab.show() # ! When aggregating across synapse models, excitatory and inhibitory # ! connections are combined. By default, excitatory connections are weights # ! with +1, inhibitory connections with -1 in the sum. This may yield kernels # ! with positive and negative values. They are shown on a red-white-blue scale # ! as follows: # ! # ! - White always represents 0. # ! - Positive values are represented by increasingly saturated red. # ! - Negative values are represented by increasingly saturated blue. # ! - Colorscales are separate for red and blue: # ! # ! * largest positive value: fully saturated red # ! * largest negative value: fully saturated blue # ! # ! - Each patch has its own colorscales. # ! - When ``aggrSyns=True`` is combined with ``globalColors=True``, # ! all patches use the same minimum and maximum in their red and blue # ! color scales. The the minimum is the negative of the maximum, so that # ! blue and red intesities can be compared. s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True) pylab.show() # ! - We can explicitly set the limits of the color scale; if values exceeding # ! the limits are present, this is indicated by an arrowhead at the end of # ! the colorbar. User-defined color limits need not be symmetric about 0. s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True, colorLimits=[-2, 3]) pylab.show() # ! Save pattern to file # ! -------------------- # s_cp.plot(file='simple_example.png') # ! This saves the detailed diagram to the given file. If you want to save # ! the pattern in several file formats, you can pass a tuple of file names, # ! e.g., ``s_cp.plot(file=('a.eps', 'a.png'))``. # ! # ! **NB:** Saving directly to PDF may lead to files with artifacts. We # ! recommend to save to EPS and the convert to PDF. # ! Build network in NEST # ! --------------------- import nest import nest.topology as topo # ! Create models for model in s_model: nest.CopyModel(model[0], model[1], model[2]) # ! Create layers, store layer info in Python variable for layer in s_layer: exec ('%s = topo.CreateLayer(layer[1])' % layer[0]) # ! Create connections, need to insert variable names for conn in s_conn: eval('topo.ConnectLayers(%s,%s,conn[2])' % (conn[0], conn[1])) nest.Simulate(10) # ! **Ooops:*** Nothing happened? Well, it did, but pyreport cannot capture the # ! output directly generated by NEST. The absence of an error message in this # ! place shows that network construction and simulation went through. # ! Inspecting the connections actually created # ! ::::::::::::::::::::::::::::::::::::::::::: # ! The following block of messy and makeshift code plots the targets of the # ! center neuron of the B/E population in the B/E and the B/I populations. B_top = nest.GetStatus(RG, 'topology')[0] ctr_id = topo.GetElement(RG, [int(B_top['rows'] / 2), int(B_top['columns'] / 2)]) # find excitatory element in B E_id = [gid for gid in ctr_id if nest.GetStatus([gid], 'model')[0] == 'E'] # get all targets, split into excitatory and inhibitory alltgts = nest.GetStatus( nest.GetConnections(E_id, synapse_model='static_synapse'), 'target') Etgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'E'] Itgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'I'] # obtain positions of targets Etpos = tuple(zip(*topo.GetPosition(Etgts))) Itpos = tuple(zip(*topo.GetPosition(Itgts))) # plot excitatory pylab.clf() pylab.subplot(121) pylab.scatter(Etpos[0], Etpos[1]) ctrpos = pylab.array(topo.GetPosition(E_id)[0]) ax = pylab.gca() ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99, fc='r', alpha=0.4, ec='none')) ax.add_patch( pylab.Rectangle(ctrpos + pylab.array((-0.4, -0.2)), 0.8, 0.4, zorder=1, fc='none', ec='r', lw=3)) ax.add_patch( pylab.Rectangle(ctrpos + pylab.array((-0.2, -0.4)), 0.4, 0.8, zorder=1, fc='none', ec='r', lw=3)) ax.add_patch( pylab.Rectangle(ctrpos + pylab.array((-0.5, -0.5)), 1.0, 1.0, zorder=1, fc='none', ec='k', lw=3)) ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5], xticks=[], yticks=[]) # plot inhibitory pylab.subplot(122) pylab.scatter(Itpos[0], Itpos[1]) ctrpos = topo.GetPosition(E_id)[0] ax = pylab.gca() ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99, fc='r', alpha=0.4, ec='none')) ax.add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=2, fc='none', ec='r', lw=2, ls='dashed')) ax.add_patch(pylab.Circle(ctrpos, radius=0.2, zorder=2, fc='none', ec='r', lw=2, ls='dashed')) ax.add_patch(pylab.Circle(ctrpos, radius=0.3, zorder=2, fc='none', ec='r', lw=2, ls='dashed')) ax.add_patch(pylab.Circle(ctrpos, radius=0.5, zorder=2, fc='none', ec='r', lw=3)) ax.add_patch(pylab.Rectangle((-0.5, -0.5), 1.0, 1.0, zorder=1, fc='none', ec='k', lw=3)) ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5], xticks=[], yticks=[]) pylab.show() # ! Thick red lines mark the mask, dashed red lines to the right one, two and # ! three standard deviations. The sender location is marked by the red spot # ! in the center. Layers are 40x40 in size. # ! A more complex network # ! ====================== # ! # ! This network has layers A and B, with E and I populations in B. The added # ! complexity comes from the fact that we now have four synapse types: AMPA, # ! NMDA, GABA_A and GABA_B. These synapse types are known to ConnPlotter. # ! Setup and tabular display c_layer, c_conn, c_model = ex.complex() c_cp = cpl.ConnectionPattern(c_layer, c_conn) showTextTable(c_cp, 'complex_tt') # $ \centerline{\includegraphics{complex_tt.pdf}} # ! Pattern in full detail # ! ---------------------- c_cp.plot() pylab.show() # ! Note the following differences to the simple pattern case: # ! # ! - For each pair of populations, e.g., B/E as sender and B/E as target, # ! we now have two patches representing AMPA and NMDA synapse for the E # ! population, GABA_A and _B for the I population. # ! - Colors are as follows: # ! # ! :AMPA: red # ! :NMDA: orange # ! :GABA_A: blue # ! :GABA_B: purple # ! - Note that the horizontal rectangular pattern (table line 3) describes # ! AMPA synapses, while the vertical rectangular pattern (table line 4) # ! describes NMDA synapses. # ! Full detail, common color scale # ! ------------------------------- c_cp.plot(globalColors=True) pylab.show() # ! As above, but now with a common color scale. # ! **NB:** The patch for the B/I to B/I connection may look empty, but it # ! actually shows a very light shade of red. Rules are as follows: # ! # ! - If there is no connection between two populations, show the grey layer # ! background. # ! - All parts of the target layer that are outside the mask or strictly zero # ! are off-white. # ! - If it looks bright white, it is a very diluted shade of the color for the # ! pertaining synpase type. # ! Full detail, explicit color limits # ! ---------------------------------- c_cp.plot(colorLimits=[0, 1]) pylab.show() # ! As above, but the common color scale is now given explicitly. # ! The arrow at the right end of the color scale indicates that the values # ! in the kernels extend beyond +1. # ! Aggregate by synapse models # ! ----------------------------- # ! For each population pair, connections are summed across # ! synapse models. # ! # ! - Excitatory kernels are weighted with +1, inhibitory kernels with -1. # ! - The resulting kernels are shown on a color scale ranging from red # ! (inhibitory) via white (zero) to blue (excitatory). # ! - Each patch has its own color scale c_cp.plot(aggrSyns=True) pylab.show() # ! # ! - AMPA and NMDA connections from B/E to B/E are now combined to form a # ! cross. # ! - GABA_A and GABA_B connections from B/I to B/E are two concentric spots. # ! Aggregate by population group # ! ------------------------------ c_cp.plot(aggrGroups=True) pylab.show() # ! This is in many ways orthogonal to aggregation by synapse model: # ! We keep synapse types separat, while we combine across populations. Thus, # ! we have added the horizonal bar (B/E to B/E, row 3) with the spot # ! (B/E to B/I, row 5). # ! Aggregate by population group and synapse model # ! ----------------------------------------------------------------- c_cp.plot(aggrGroups=True, aggrSyns=True) pylab.show() # ! All connection are combined for each pair of sender/target layer. # ! CPTs using the total charge deposited (TCD) as intensity # ! ----------------------------------------------------------- # ! TCD-based CPTs are currently only available for the ht_neuron, since # ! ConnPlotter does not know how to obtain \int g(t) dt from NEST for other # ! conductance-based model neurons. # ! We need to create a separate ConnectionPattern instance for each membrane # ! potential we want to use in the TCD computation c_cp_75 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd', mList=c_model, Vmem=-75.0) c_cp_45 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd', mList=c_model, Vmem=-45.0) # ! In order to obtain a meaningful comparison between both membrane # ! potentials, we use the same global color scale. # ! V_m = -75 mV # ! :::::::::::::: c_cp_75.plot(colorLimits=[0, 150]) pylab.show() # ! V_m = -45 mV # ! :::::::::::::: c_cp_45.plot(colorLimits=[0, 150]) pylab.show() # ! Note that the NMDA projection virtually vanishes for V_m=-75mV, but is very # ! strong for V_m=-45mV. GABA_A and GABA_B projections are also stronger, # ! while AMPA is weaker for V_m=-45mV. # ! Non-Dale network model # ! ====================== # ! By default, ConnPlotter assumes that networks follow Dale's law, i.e., # ! either make excitatory or inhibitory connections. If this assumption # ! is violated, we need to inform ConnPlotter how synapse types are grouped. # ! We look at a simple example here. # ! Load model nd_layer, nd_conn, nd_model = ex.non_dale() # ! We specify the synapse configuration using the synTypes argument: # ! # ! - synTypes is a tuple. # ! - Each element in the tuple represents a group of synapse models # ! - Any sender can make connections with synapses from **one group only**. # ! - Each synapse model is specified by a ``SynType``. # ! - The SynType constructor takes three arguments: # ! # ! * The synapse model name # ! * The weight to apply then aggregating across synapse models # ! * The color to use for the synapse type # ! # ! - Synapse names must be unique, and must form a superset of all synapse # ! models in the network. nd_cp = cpl.ConnectionPattern(nd_layer, nd_conn, synTypes=( (cpl.SynType('exc', 1.0, 'b'), cpl.SynType('inh', -1.0, 'r')),)) showTextTable(nd_cp, 'non_dale_tt') # $ \centerline{\includegraphics{non_dale_tt.pdf}} nd_cp.plot() pylab.show() # ! Note that we now have red and blue patches side by side, as the same # ! population can make excitatory and inhibitory connections. # ! Configuring the ConnectionPattern display # ! ========================================= # ! I will now show you a few ways in which you can configure how ConnPlotter # ! shows connection patterns. # ! User defined synapse types # ! -------------------------- # ! # ! By default, ConnPlotter knows two following sets of synapse types. # ! # ! exc/inh # ! - Used automatically when all connections have the same synapse_model. # ! - Connections with positive weight are assigned model exc, those with # ! negative weight model inh. # ! - When computing totals, exc has weight +1, inh weight -1 # ! - Exc is colored blue, inh red. # ! # ! AMPA/NMDA/GABA_A/GABA_B # ! - Used if the set of ``synapse_model`` s in the network is a subset of # ! those four types. # ! - AMPA/NMDA carry weight +1, GABA_A/GABA_B weight -1. # ! - Colors are as follows: # ! # ! :AMPA: blue # ! :NMDA: green # ! :GABA_A: red # ! :GABA_B: magenta # ! # ! # ! We saw a first example of user-defined synapse types in the non-Dale # ! example above. In that case, we only changed the grouping. Here, I will # ! demonstrate the effect of different ordering, weighting, and color # ! specifications. We use the complex model from above as example. # ! # ! *NOTE*: It is most likey a *bad idea* to change the colors or placement of # ! synapse types. If everyone uses the same design rules, we will all be able # ! to read each others figures much more easily. # ! Placement of synapse types # ! :::::::::::::::::::::::::: # ! # ! The ``synTypes`` nested tuple defines the placement of patches for # ! different synapse models. Default layout is # ! # ! ====== ====== # ! AMPA NMDA # ! GABA_A GABA_B # ! ====== ====== # ! # ! All four matrix elements are shown in this layout only when using # ! ``mode='layer'`` display. Otherwise, one or the other row is shown. # ! Note that synapses that can arise from a layer simultaneously, must # ! always be placed on one matrix row, i.e., in one group. As an example, # ! we now invert placement, without any other changes: cinv_syns = ((cpl.SynType('GABA_B', -1, 'm'), cpl.SynType('GABA_A', -1, 'r')), (cpl.SynType('NMDA', 1, 'g'), cpl.SynType('AMPA', 1, 'b'))) cinv_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cinv_syns) cinv_cp.plot() pylab.show() # ! Notice that on each row the synapses are exchanged compared to the original # ! figure above. When displaying by layer, also the rows have traded place: cinv_cp.plot(aggrGroups=True) pylab.show() # ! Totals are not affected: cinv_cp.plot(aggrGroups=True, aggrSyns=True) pylab.show() # ! Weighting of synapse types in ``totals`` mode # ! ::::::::::::::::::::::::::::::::::::::::::::: # ! # ! Different synapses may have quite different efficacies, so weighting them # ! all with +-1 when computing totals may give a wrong impression. Different # ! weights can be supplied as second argument to SynTypes(). We return to the # ! normal placement of synapses and # ! create two examples with very different weights: cw1_syns = ((cpl.SynType('AMPA', 10, 'b'), cpl.SynType('NMDA', 1, 'g')), (cpl.SynType('GABA_A', -2, 'g'), cpl.SynType('GABA_B', -10, 'b'))) cw1_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw1_syns) cw2_syns = ((cpl.SynType('AMPA', 1, 'b'), cpl.SynType('NMDA', 10, 'g')), (cpl.SynType('GABA_A', -20, 'g'), cpl.SynType('GABA_B', -1, 'b'))) cw2_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw2_syns) # ! We first plot them both in population mode cw1_cp.plot(aggrSyns=True) pylab.show() cw2_cp.plot(aggrSyns=True) pylab.show() # ! Finally, we plot them aggregating across groups and synapse models cw1_cp.plot(aggrGroups=True, aggrSyns=True) pylab.show() cw2_cp.plot(aggrGroups=True, aggrSyns=True) pylab.show() # ! Alternative colors for synapse patches # ! :::::::::::::::::::::::::::::::::::::: # ! Different colors can be specified using any legal color specification. # ! Colors should be saturated, as they will be mixed with white. You may # ! also provide a colormap explicitly. For this example, we use once more # ! normal placement and weights. As all synapse types are shown in layer # ! mode, we use that mode for display here. cc_syns = ( (cpl.SynType('AMPA', 1, 'maroon'), cpl.SynType('NMDA', 1, (0.9, 0.5, 0))), (cpl.SynType('GABA_A', -1, '0.7'), cpl.SynType('GABA_B', 1, pylab.cm.hsv))) cc_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cc_syns) cc_cp.plot(aggrGroups=True) pylab.show() # ! We get the following colors: # ! # ! AMPA brownish # ! NMDA golden orange # ! GABA_A jet colormap from red (max) to blue (0) # ! GABA_B grey # ! # ! **NB:** When passing an explicit colormap, parts outside the mask will be # ! shown to the "bad" color of the colormap, usually the "bottom" color in the # ! map. To let points outside the mask appear in white, set the bad color of # ! the colormap; unfortunately, this modifies the colormap. pylab.cm.hsv.set_bad(cpl.colormaps.bad_color) ccb_syns = ( (cpl.SynType('AMPA', 1, 'maroon'), cpl.SynType('NMDA', 1, (0.9, 0.5, 0.1))), (cpl.SynType('GABA_A', -1, '0.7'), cpl.SynType('GABA_B', 1, pylab.cm.hsv))) ccb_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=ccb_syns) ccb_cp.plot(aggrGroups=True) pylab.show() # ! Other configuration options # ! --------------------------- # ! # ! Some more adjustments are possible by setting certain module properties. # ! Some of these need to be set before ConnectionPattern() is constructed. # ! # ! Background color for masked parts of each patch cpl.colormaps.bad_color = 'cyan' # ! Background for layers cpl.plotParams.layer_bg = (0.8, 0.8, 0.0) # ! Resolution for patch computation cpl.plotParams.n_kern = 5 # ! Physical size of patches: longest egde of largest patch, in mm cpl.plotParams.patch_size = 40 # ! Margins around the figure (excluding labels) cpl.plotParams.margins.left = 40 cpl.plotParams.margins.top = 30 cpl.plotParams.margins.bottom = 15 cpl.plotParams.margins.right = 30 # ! Fonts for layer and population labels import matplotlib.font_manager as fmgr cpl.plotParams.layer_font = fmgr.FontProperties(family='serif', weight='bold', size='xx-large') cpl.plotParams.pop_font = fmgr.FontProperties('small') # ! Orientation for layer and population label cpl.plotParams.layer_orientation = {'sender': 'vertical', 'target': 60} cpl.plotParams.pop_orientation = {'sender': 'horizontal', 'target': -45} # ! Font for legend titles and ticks, tick placement, and tick format cpl.plotParams.legend_title_font = fmgr.FontProperties(family='serif', weight='bold', size='large') cpl.plotParams.legend_tick_font = fmgr.FontProperties(family='sans-serif', weight='light', size='xx-small') cpl.plotParams.legend_ticks = [0, 1, 2] cpl.plotParams.legend_tick_format = '%.1f pA' cx_cp = cpl.ConnectionPattern(c_layer, c_conn) cx_cp.plot(colorLimits=[0, 2]) pylab.show() # ! Several more options are available to control the format of the color bars # ! (they all are members of plotParams): # ! * legend_location : if 'top', place synapse name atop color bar # ! * cbwidth : width of single color bar relative to figure # ! * margins.colbar : height of lower margin set aside for color bar, in mm # ! * cbheight : height of single color bar relative to margins.colbar # ! * cbwidth : width of single color bar relative to figure width # ! * cbspace : spacing between color bars, relative to figure width # ! * cboffset : offset of first color bar from left margin, relative to # ! figure width # ! You can also specify the width of the final figure, but this may not work # ! well with on-screen display or here in pyreport. Width is in mm. # ! Note that left and right margin combined are 70mm wide, so only 50mm are # ! left for the actual CPT. cx_cp.plot(fixedWidth=120) pylab.show() # ! If not using pyreport, we finally show and block if not using_pyreport: print("") print("The connplotter_tutorial script is done. " + "Call pylab.show() and enjoy the figures!") print( "You may need to close all figures manually " + "to get the Python prompt back.") print("") pylab.show = pylab_show
gpl-2.0
mikolajsacha/tweetsclassification
src/features/sentence_embeddings/isentence_embedding.py
1
2193
""" Contains basic interface (abstract base class) for sentence embeddings. """ from abc import ABCMeta, abstractmethod from sklearn.decomposition import PCA class ISentenceEmbedding(object): """ Abstract base class for sentece embeddings. Sentence embedding creates vectors representing sentences (word lists) using a specified word embedding. """ __metaclass__ = ABCMeta def __init__(self, target_sentence_vector_length): self.use_pca = target_sentence_vector_length is not None if self.use_pca: self.target_vector_length = target_sentence_vector_length self.pca = PCA(n_components=self.target_vector_length) def build(self, word_embedding, sentences = None): """ A wrapper for build_raw which performs further preprocessing on embedding """ self.build_raw(word_embedding) if self.use_pca: if sentences is None: raise AttributeError("When using PCA you must provide training sentences to sentence embedding") self.pca.fit([self.get_raw_vector(sentence) for sentence in sentences]) @abstractmethod def build_raw(self, word_embedding): """ Generates sentence embedding for a given word embedding :param word_embedding: word embedding, for which sentence embedding will be built :type word_embedding: an instance of class implementing IWordEmbedding interface """ raise NotImplementedError def __getitem__(self, sentence): """ A wrapper for get_raw_vector which returns vector after preprocessing """ if self.use_pca: return self.pca.transform([self.get_raw_vector(sentence)])[0] else: return self.get_raw_vector(sentence) @abstractmethod def get_raw_vector(self, sentence): """ Returns vector representation for a given sentence based on current model :param sentence: sentence to be vectorized :type sentence: list of strings (words) :return: vector representation of the sentence, formatted as numpy vector of doubles """ raise NotImplementedError
mit
IntelLabs/hpat
examples/series/str/series_str_casefold.py
1
1838
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** import pandas as pd from numba import njit @njit def series_str_casefold(): series = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']) out_series = series.str.casefold() return out_series # Expect series of 'lower', 'capitals', 'this is a sentence', 'swapcase' print(series_str_casefold())
bsd-2-clause
OpenMined/PySyft
packages/syft/docs/conf.py
1
10687
# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # stdlib import datetime import inspect import os from pathlib import Path import shutil import sys from typing import Any from typing import Dict dir_name = inspect.getfile(inspect.currentframe()) # type: ignore __location__ = os.path.join(os.getcwd(), os.path.dirname(dir_name)) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(__location__, "../src")) # -- Run sphinx-apidoc ------------------------------------------------------ # This hack is necessary since RTD does not issue `sphinx-apidoc` before running # `sphinx-build -b html . _build/html`. See Issue: # https://github.com/rtfd/readthedocs.org/issues/1139 # DON'T FORGET: Check the box "Install your project inside a virtualenv using # setup.py install" in the RTD Advanced Settings. # Additionally it helps us to avoid running apidoc manually try: # for Sphinx >= 1.7 # third party from sphinx.ext import apidoc except ImportError: # third party from sphinx import apidoc output_dir = os.path.join(__location__, "api") module_dir = os.path.join(__location__, "../src/syft") try: shutil.rmtree(output_dir) except FileNotFoundError: pass try: # third party from pkg_resources import parse_version import sphinx # found this --module-first here shorturl.at/iDKNW cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir} --module-first" cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) args = cmd_line.split(" ") if parse_version(sphinx.__version__) >= parse_version("1.7"): args = args[1:] apidoc.main(args) except Exception as e: print(f"Running `sphinx-apidoc` failed!\n{e}") # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.autosummary", "sphinx.ext.viewcode", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", ] extensions.append("recommonmark") # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # override autodoc defaults to skip/not skip certain methods def skip( app: Any, what: Any, name: str, obj: Any, would_skip: bool, options: Any ) -> bool: if name == "__init__": return False if name == "__hash__": return False if name == "__eq__": return False if name == "_proto2object": return False if name == "_object2proto": return False if name == "_serialize": return False if name == "_deserialize": return False return would_skip # To configure AutoStructify def setup(app: Any) -> None: # third party from recommonmark.transform import AutoStructify app.add_config_value( "recommonmark_config", { "auto_toc_tree_section": "Contents", "enable_eval_rst": True, "enable_math": True, "enable_inline_math": True, }, True, ) app.add_transform(AutoStructify) app.connect("autodoc-skip-member", skip) # The suffix of source filenames. source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The main toctree document. master_doc = "index" # General information about the project. now = datetime.datetime.now() project = "syft" copyright = f"{now.year}, OpenMined Core Contributors" # The version info for the project you're documenting, acts as a replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. default_version = "" # Is set by calling `setup.py docs` # The full version, including alpha/beta/rc tags. release = "" # Is set by calling `setup.py docs` # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to the source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The rest default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, section_author and module_author directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx-theme-graphite' # html_theme = "alabaster" html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {"sidebar_width": "300px"} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_themes/"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". try: # syft absolute from syft import __version__ as version except ImportError: pass else: release = default_version # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "" # The name of an image file (within the static path) to use as the favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # sort methods by source order autodoc_member_order = "bysource" # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "syft-doc" # -- Options for LaTeX output -------------------------------------------------- latex_elements: Dict[str, str] = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", "user_guide.tex", "syft Documentation", "Andrew Trask", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = "" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- External mapping ------------------------------------------------------------ python_version = ".".join(map(str, sys.version_info[0:2])) intersphinx_mapping = { "sphinx": ("http://www.sphinx-doc.org/en/stable", None), "python": ("https://docs.python.org/" + python_version, None), "matplotlib": ("https://matplotlib.org", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "sklearn": ("http://scikit-learn.org/stable", None), "pandas": ("http://pandas.pydata.org/pandas-docs/stable", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), }
apache-2.0
lukauskas/scipy
scipy/interpolate/ndgriddata.py
45
7161
""" Convenience interface to N-D interpolation .. versionadded:: 0.9 """ from __future__ import division, print_function, absolute_import import numpy as np from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \ CloughTocher2DInterpolator, _ndim_coords_from_arrays from scipy.spatial import cKDTree __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator', 'CloughTocher2DInterpolator'] #------------------------------------------------------------------------------ # Nearest-neighbour interpolation #------------------------------------------------------------------------------ class NearestNDInterpolator(NDInterpolatorBase): """ NearestNDInterpolator(points, values) Nearest-neighbour interpolation in N dimensions. .. versionadded:: 0.9 Methods ------- __call__ Parameters ---------- x : (Npoints, Ndims) ndarray of floats Data point coordinates. y : (Npoints,) ndarray of float or complex Data values. rescale : boolean, optional Rescale points to unit cube before performing interpolation. This is useful if some of the input dimensions have incommensurable units and differ by many orders of magnitude. .. versionadded:: 0.14.0 Notes ----- Uses ``scipy.spatial.cKDTree`` """ def __init__(self, x, y, rescale=False): NDInterpolatorBase.__init__(self, x, y, rescale=rescale, need_contiguous=False, need_values=False) self.tree = cKDTree(self.points) self.values = y def __call__(self, *args): """ Evaluate interpolator at given points. Parameters ---------- xi : ndarray of float, shape (..., ndim) Points where to interpolate data at. """ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1]) xi = self._check_call_shape(xi) xi = self._scale_x(xi) dist, i = self.tree.query(xi) return self.values[i] #------------------------------------------------------------------------------ # Convenience interface function #------------------------------------------------------------------------------ def griddata(points, values, xi, method='linear', fill_value=np.nan, rescale=False): """ Interpolate unstructured D-dimensional data. Parameters ---------- points : ndarray of floats, shape (n, D) Data point coordinates. Can either be an array of shape (n, D), or a tuple of `ndim` arrays. values : ndarray of float or complex, shape (n,) Data values. xi : ndarray of float, shape (M, D) Points at which to interpolate data. method : {'linear', 'nearest', 'cubic'}, optional Method of interpolation. One of ``nearest`` return the value at the data point closest to the point of interpolation. See `NearestNDInterpolator` for more details. ``linear`` tesselate the input point set to n-dimensional simplices, and interpolate linearly on each simplex. See `LinearNDInterpolator` for more details. ``cubic`` (1-D) return the value determined from a cubic spline. ``cubic`` (2-D) return the value determined from a piecewise cubic, continuously differentiable (C1), and approximately curvature-minimizing polynomial surface. See `CloughTocher2DInterpolator` for more details. fill_value : float, optional Value used to fill in for requested points outside of the convex hull of the input points. If not provided, then the default is ``nan``. This option has no effect for the 'nearest' method. rescale : bool, optional Rescale points to unit cube before performing interpolation. This is useful if some of the input dimensions have incommensurable units and differ by many orders of magnitude. .. versionadded:: 0.14.0 Notes ----- .. versionadded:: 0.9 Examples -------- Suppose we want to interpolate the 2-D function >>> def func(x, y): ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 on a grid in [0, 1]x[0, 1] >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] but we only know its values at 1000 data points: >>> points = np.random.rand(1000, 2) >>> values = func(points[:,0], points[:,1]) This can be done with `griddata` -- below we try out all of the interpolation methods: >>> from scipy.interpolate import griddata >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest') >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') One can see that the exact result is reproduced by all of the methods to some degree, but for this smooth function the piecewise cubic interpolant gives the best results: >>> import matplotlib.pyplot as plt >>> plt.subplot(221) >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1) >>> plt.title('Original') >>> plt.subplot(222) >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Nearest') >>> plt.subplot(223) >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Linear') >>> plt.subplot(224) >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Cubic') >>> plt.gcf().set_size_inches(6, 6) >>> plt.show() """ points = _ndim_coords_from_arrays(points) if points.ndim < 2: ndim = points.ndim else: ndim = points.shape[-1] if ndim == 1 and method in ('nearest', 'linear', 'cubic'): from .interpolate import interp1d points = points.ravel() if isinstance(xi, tuple): if len(xi) != 1: raise ValueError("invalid number of dimensions in xi") xi, = xi # Sort points/values together, necessary as input for interp1d idx = np.argsort(points) points = points[idx] values = values[idx] ip = interp1d(points, values, kind=method, axis=0, bounds_error=False, fill_value=fill_value) return ip(xi) elif method == 'nearest': ip = NearestNDInterpolator(points, values, rescale=rescale) return ip(xi) elif method == 'linear': ip = LinearNDInterpolator(points, values, fill_value=fill_value, rescale=rescale) return ip(xi) elif method == 'cubic' and ndim == 2: ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value, rescale=rescale) return ip(xi) else: raise ValueError("Unknown interpolation method %r for " "%d dimensional data" % (method, ndim))
bsd-3-clause