repo_name
stringlengths
7
90
path
stringlengths
5
191
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
976
581k
license
stringclasses
15 values
samuelgarcia/HearingLossSimulator
hearinglosssimulator/tests/find_good_chunksize.py
1
4416
""" chunksize and backward_chunksize variables have a strong impact on the quality of backward filtering. Normally the backward stage pgc2 shoudl be done offline for the whole buffer. For online it is done chunk by chunksize. For low frequency this lead to bias the result because of side effect, so the chunksize and backward_chunksize should be choosen carefully. The compute the error bewteen the offline and online backward filter for some backward_chunksize """ import hearinglosssimulator as hls import numpy as np import scipy.signal import matplotlib.pyplot as plt def plot_residual(): nb_channel = 1 sample_rate =44100. chunksize = 256 #~ chunksize = 512 #~ chunksize = 1024 #~ chunksize = #~ nloop = 200 nloop = 200 nb_freq_band=10 length = int(chunksize*nloop) in_buffer = hls.whitenoise(length, sample_rate=sample_rate,) in_buffer = np.tile(in_buffer[:, None],(1, nb_channel)) #~ lost_chunksize = np.linspace(0,1024, 5).astype(int) lost_chunksize = np.arange(7).astype(int) * chunksize #~ backward_chunksizes = [512,1024,1536,2048] #~ backward_chunksizes = [1024,1536,2048] #~ backward_chunksizes = np.linspace(1024,2048, 5).astype(int) backward_chunksizes = lost_chunksize + chunksize all_mean_residuals = np.zeros((len(backward_chunksizes), nb_freq_band)) all_max_residuals = np.zeros((len(backward_chunksizes), nb_freq_band)) for i, backward_chunksize in enumerate(backward_chunksizes): print('backward_chunksize', backward_chunksize) loss_params = { 'left' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.], 'compression_degree': [0., 0., 0., 0., 0., 0., 0.], 'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.], }} processing_conf = dict(nb_freq_band=nb_freq_band, low_freq = 40., high_freq = 500., level_max=100, level_step=100, debug_mode=True, chunksize=chunksize, backward_chunksize=backward_chunksize, loss_params=loss_params) processing = hls.InvCGC(nb_channel=nb_channel, sample_rate=sample_rate, dtype='float32', **processing_conf) online_arrs = hls.run_instance_offline(processing, in_buffer, chunksize, sample_rate, dtype='float32', buffersize_margin=backward_chunksize) #~ processing, online_arrs = hls.run_one_class_offline(hls.InvCGC, in_buffer, chunksize, sample_rate, processing_conf=processing_conf, buffersize_margin=backward_chunksize) #~ freq_band = 2 online_hpaf = online_arrs['hpaf'] online_pgc2 = online_arrs['pgc2'] offline_pgc2 = online_pgc2.copy() n = processing.nb_freq_band for b in range(n): offline_pgc2[:, b] = scipy.signal.sosfilt(processing.coefficients_pgc[b, :,:], online_hpaf[::-1,b])[::-1] online_pgc2 = online_pgc2[:-backward_chunksize] offline_pgc2 = offline_pgc2[:-backward_chunksize] residual = np.abs((online_pgc2.astype('float64')-offline_pgc2.astype('float64'))/np.mean(np.abs(offline_pgc2.astype('float64')), axis=0)) all_mean_residuals[i, :] = np.mean(residual, axis=0) all_max_residuals[i, :] = np.max(residual, axis=0) def my_imshow(m, ax): im = ax.imshow(m, interpolation='nearest', origin ='lower', aspect = 'auto', cmap = 'viridis')#, extent = extent, cmap=cmap) im.set_clim(0,0.05) ax.set_xticks(np.arange(processing.freqs.size)) ax.set_xticklabels(['{:0.0f}'.format(f) for f in processing.freqs]) ax.set_yticks(np.arange(len(backward_chunksizes))) ax.set_yticklabels(['{}'.format(f) for f in lost_chunksize]) ax.set_xlabel('freq') ax.set_ylabel('lost_chunksize') return im print(all_max_residuals) fig, axs = plt.subplots(nrows = 2, sharex=True) im1 = my_imshow(all_mean_residuals, axs[0]) im2 = my_imshow(all_max_residuals, axs[1]) cax = fig.add_axes([0.92 , 0.05 , .02, 0.9 ] ) fig.colorbar(im1, ax=axs[0], cax=cax, orientation='vertical') plt.show() if __name__ =='__main__': plot_residual()
mit
lukebarnard1/bokeh
examples/charts/file/scatter.py
37
1607
from collections import OrderedDict import pandas as pd from bokeh.charts import Scatter, output_file, show, vplot from bokeh.sampledata.iris import flowers setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]] versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]] virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]] xyvalues = OrderedDict([("setosa", setosa.values), ("versicolor", versicolor.values), ("virginica", virginica.values)]) scatter1 = Scatter(xyvalues, title="iris dataset, dict_input", xlabel="petal_length", ylabel="petal_width", legend='top_left', marker="triangle") groupped_df = flowers[["petal_length", "petal_width", "species"]].groupby("species") scatter2 = Scatter(groupped_df, title="iris dataset, dict_input", xlabel="petal_length", ylabel="petal_width", legend='top_left') pdict = OrderedDict() for i in groupped_df.groups.keys(): labels = groupped_df.get_group(i).columns xname = labels[0] yname = labels[1] x = getattr(groupped_df.get_group(i), xname) y = getattr(groupped_df.get_group(i), yname) pdict[i] = list(zip(x, y)) df = pd.DataFrame(pdict) scatter3 = Scatter( df, title="iris dataset, dict_input", xlabel="petal_length", ylabel="petal_width", legend='top_left') scatter4 = Scatter( list(xyvalues.values()), title="iris dataset, dict_input", xlabel="petal_length", ylabel="petal_width", legend='top_left') output_file("scatter.html") show(vplot(scatter1, scatter2, scatter3, scatter4))
bsd-3-clause
guillermo-carrasco/bcbio-nextgen
bcbio/utils.py
1
20334
"""Helpful utilities for building analysis pipelines. """ import gzip import os import tempfile import time import shutil import contextlib import itertools import functools import random import ConfigParser import collections import fnmatch import subprocess import sys import subprocess import toolz as tz import yaml try: from concurrent import futures except ImportError: try: import futures except ImportError: futures = None @contextlib.contextmanager def cpmap(cores=1): """Configurable parallel map context manager. Returns appropriate map compatible function based on configuration: - Local single core (the default) - Multiple local cores """ if int(cores) == 1: yield itertools.imap else: if futures is None: raise ImportError("concurrent.futures not available") pool = futures.ProcessPoolExecutor(cores) yield pool.map pool.shutdown() def map_wrap(f): """Wrap standard function to easily pass into 'map' processing. """ @functools.wraps(f) def wrapper(*args, **kwargs): return apply(f, *args, **kwargs) return wrapper def transform_to(ext): """ Decorator to create an output filename from an output filename with the specified extension. Changes the extension, in_file is transformed to a new type. Takes functions like this to decorate: f(in_file, out_dir=None, out_file=None) or, f(in_file=in_file, out_dir=None, out_file=None) examples: @transform(".bam") f("the/input/path/file.sam") -> f("the/input/path/file.sam", out_file="the/input/path/file.bam") @transform(".bam") f("the/input/path/file.sam", out_dir="results") -> f("the/input/path/file.sam", out_file="results/file.bam") """ def decor(f): @functools.wraps(f) def wrapper(*args, **kwargs): out_file = kwargs.get("out_file", None) if not out_file: in_path = kwargs.get("in_file", args[0]) out_dir = kwargs.get("out_dir", os.path.dirname(in_path)) safe_makedir(out_dir) out_name = replace_suffix(os.path.basename(in_path), ext) out_file = os.path.join(out_dir, out_name) kwargs["out_file"] = out_file if not file_exists(out_file): out_file = f(*args, **kwargs) return out_file return wrapper return decor def filter_to(word): """ Decorator to create an output filename from an input filename by adding a word onto the stem. in_file is filtered by the function and the results are written to out_file. You would want to use this over transform_to if you don't know the extension of the file going in. This also memoizes the output file. Takes functions like this to decorate: f(in_file, out_dir=None, out_file=None) or, f(in_file=in_file, out_dir=None, out_file=None) examples: @filter_to(".foo") f("the/input/path/file.sam") -> f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam") @filter_to(".foo") f("the/input/path/file.sam", out_dir="results") -> f("the/input/path/file.sam", out_file="results/file.foo.bam") """ def decor(f): @functools.wraps(f) def wrapper(*args, **kwargs): out_file = kwargs.get("out_file", None) if not out_file: in_path = kwargs.get("in_file", args[0]) out_dir = kwargs.get("out_dir", os.path.dirname(in_path)) safe_makedir(out_dir) out_name = append_stem(os.path.basename(in_path), word) out_file = os.path.join(out_dir, out_name) kwargs["out_file"] = out_file if not file_exists(out_file): out_file = f(*args, **kwargs) return out_file return wrapper return decor def memoize_outfile(ext=None, stem=None): """ Memoization decorator. See docstring for transform_to and filter_to for details. """ if ext: return transform_to(ext) if stem: return filter_to(stem) def safe_makedir(dname): """Make a directory if it doesn't exist, handling concurrent race conditions. """ if not dname: return dname num_tries = 0 max_tries = 5 while not os.path.exists(dname): # we could get an error here if multiple processes are creating # the directory at the same time. Grr, concurrency. try: os.makedirs(dname) except OSError: if num_tries > max_tries: raise num_tries += 1 time.sleep(2) return dname @contextlib.contextmanager def chdir(new_dir): """Context manager to temporarily change to a new directory. http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/ """ cur_dir = os.getcwd() safe_makedir(new_dir) os.chdir(new_dir) try: yield finally: os.chdir(cur_dir) @contextlib.contextmanager def tmpfile(*args, **kwargs): """Make a tempfile, safely cleaning up file descriptors on completion. """ (fd, fname) = tempfile.mkstemp(*args, **kwargs) try: yield fname finally: os.close(fd) if os.path.exists(fname): os.remove(fname) def file_exists(fname): """Check if a file exists and is non-empty. """ try: return fname and os.path.exists(fname) and os.path.getsize(fname) > 0 except OSError: return False def file_uptodate(fname, cmp_fname): """Check if a file exists, is non-empty and is more recent than cmp_fname. """ try: return (file_exists(fname) and file_exists(cmp_fname) and os.path.getmtime(fname) >= os.path.getmtime(cmp_fname)) except OSError: return False def create_dirs(config, names=None): if names is None: names = config["dir"].keys() for dname in names: d = config["dir"][dname] safe_makedir(d) def save_diskspace(fname, reason, config): """Overwrite a file in place with a short message to save disk. This keeps files as a sanity check on processes working, but saves disk by replacing them with a short message. """ if config["algorithm"].get("save_diskspace", False): with open(fname, "w") as out_handle: out_handle.write("File removed to save disk space: %s" % reason) def read_galaxy_amqp_config(galaxy_config, base_dir): """Read connection information on the RabbitMQ server from Galaxy config. """ galaxy_config = add_full_path(galaxy_config, base_dir) config = ConfigParser.ConfigParser() config.read(galaxy_config) amqp_config = {} for option in config.options("galaxy_amqp"): amqp_config[option] = config.get("galaxy_amqp", option) return amqp_config def add_full_path(dirname, basedir=None): if basedir is None: basedir = os.getcwd() if not dirname.startswith("/"): dirname = os.path.join(basedir, dirname) return dirname def splitext_plus(f): """Split on file extensions, allowing for zipped extensions. """ base, ext = os.path.splitext(f) if ext in [".gz", ".bz2", ".zip"]: base, ext2 = os.path.splitext(base) ext = ext2 + ext return base, ext def remove_safe(f): try: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) except OSError: pass def file_plus_index(fname): """Convert a file name into the file plus required indexes. """ exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi", ".fq.gz": ".gbi"} ext = splitext_plus(fname)[-1] if ext in exts: return [fname, fname + exts[ext]] else: return [fname] def copy_plus(orig, new): """Copy a fils, including biological index files. """ for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]: if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)): shutil.copyfile(orig + ext, new + ext) def symlink_plus(orig, new): """Create relative symlinks and handle associated biological index files. """ for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]: if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)): with chdir(os.path.dirname(new)): remove_safe(new + ext) # Work around symlink issues on some filesystems. Randomly # fail to symlink. try: os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext)) except OSError: if not os.path.exists(new + ext) or not os.path.lexists(new + ext): remove_safe(new + ext) shutil.copyfile(orig + ext, new + ext) orig_noext = splitext_plus(orig)[0] new_noext = splitext_plus(new)[0] for sub_ext in [".bai"]: if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext): with chdir(os.path.dirname(new_noext)): os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext)) def open_gzipsafe(f): return gzip.open(f) if f.endswith(".gz") else open(f) def append_stem(to_transform, word): """ renames a filename or list of filenames with 'word' appended to the stem of each one: example: append_stem("/path/to/test.sam", "_filtered") -> "/path/to/test_filtered.sam" """ if is_sequence(to_transform): return [append_stem(f, word) for f in to_transform] elif is_string(to_transform): (base, ext) = splitext_plus(to_transform) return "".join([base, word, ext]) else: raise ValueError("append_stem takes a single filename as a string or " "a list of filenames to transform.") def replace_suffix(to_transform, suffix): """ replaces the suffix on a filename or list of filenames example: replace_suffix("/path/to/test.sam", ".bam") -> "/path/to/test.bam" """ if is_sequence(to_transform): transformed = [] for f in to_transform: (base, _) = os.path.splitext(f) transformed.append(base + suffix) return transformed elif is_string(to_transform): (base, _) = os.path.splitext(to_transform) return base + suffix else: raise ValueError("replace_suffix takes a single filename as a string or " "a list of filenames to transform.") # ## Functional programming def partition_all(n, iterable): """Partition a list into equally sized pieces, including last smaller parts http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all """ it = iter(iterable) while True: chunk = list(itertools.islice(it, n)) if not chunk: break yield chunk def robust_partition_all(n, iterable): """ replaces partition_all with a more robust version. Workaround for a segfault in pybedtools when using a BedTool as an iterator: https://github.com/daler/pybedtools/issues/88 for the discussion """ it = iter(iterable) while True: x = [] for _ in range(n): try: x.append(it.next()) except StopIteration: yield x # Omitting this StopIteration results in a segfault! raise StopIteration yield x def partition(pred, iterable, tolist=False): 'Use a predicate to partition entries into false entries and true entries' # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = itertools.tee(iterable) ifalse = itertools.ifilterfalse(pred, t1) itrue = itertools.ifilter(pred, t2) if tolist: return list(ifalse), list(itrue) else: return ifalse, itrue # ## Dealing with configuration files def merge_config_files(fnames): """Merge configuration files, preferring definitions in latter files. """ def _load_yaml(fname): with open(fname) as in_handle: config = yaml.load(in_handle) return config out = _load_yaml(fnames[0]) for fname in fnames[1:]: cur = _load_yaml(fname) for k, v in cur.iteritems(): if k in out and isinstance(out[k], dict): out[k].update(v) else: out[k] = v return out def deepish_copy(org): """Improved speed deep copy for dictionaries of simple python types. Thanks to Gregg Lind: http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/ """ out = dict().fromkeys(org) for k, v in org.iteritems(): if isinstance(v, dict): out[k] = deepish_copy(v) else: try: out[k] = v.copy() # dicts, sets except AttributeError: try: out[k] = v[:] # lists, tuples, strings, unicode except TypeError: out[k] = v # ints return out def get_in(d, t, default=None): """ look up if you can get a tuple of values from a nested dictionary, each item in the tuple a deeper layer example: get_in({1: {2: 3}}, (1, 2)) -> 3 example: get_in({1: {2: 3}}, (2, 3)) -> {} """ return tz.get_in(t, d, default) def flatten(l): """ flatten an irregular list of lists example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6] lifted from: http://stackoverflow.com/questions/2158395/ """ for el in l: if isinstance(el, collections.Iterable) and not isinstance(el, basestring): for sub in flatten(el): yield sub else: yield el def is_sequence(arg): """ check if 'arg' is a sequence example: arg([]) -> True example: arg("lol") -> False """ return (not hasattr(arg, "strip") and hasattr(arg, "__getitem__") or hasattr(arg, "__iter__")) def is_pair(arg): """ check if 'arg' is a two-item sequence """ return is_sequence(arg) and len(arg) == 2 def is_string(arg): return isinstance(arg, basestring) def locate(pattern, root=os.curdir): '''Locate all files matching supplied filename pattern in and below supplied root directory.''' for path, dirs, files in os.walk(os.path.abspath(root)): for filename in fnmatch.filter(files, pattern): yield os.path.join(path, filename) def itersubclasses(cls, _seen=None): """ snagged from: http://code.activestate.com/recipes/576949/ itersubclasses(cls) Generator over all subclasses of a given class, in depth first order. >>> list(itersubclasses(int)) == [bool] True >>> class A(object): pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL (new-style) classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS ['type', ...'tuple', ...] """ if not isinstance(cls, type): raise TypeError('itersubclasses must be called with ' 'new-style classes, not %.100r' % cls) if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub def replace_directory(out_files, dest_dir): """ change the output directory to dest_dir can take a string (single file) or a list of files """ if is_sequence(out_files): filenames = map(os.path.basename, out_files) return [os.path.join(dest_dir, x) for x in filenames] elif is_string(out_files): return os.path.join(dest_dir, os.path.basename(out_files)) else: raise ValueError("in_files must either be a sequence of filenames " "or a string") def which(program): """ returns the path to an executable or None if it can't be found""" def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def reservoir_sample(stream, num_items, item_parser=lambda x: x): """ samples num_items from the stream keeping each with equal probability """ kept = [] for index, item in enumerate(stream): if index < num_items: kept.append(item_parser(item)) else: r = random.randint(0, index) if r < num_items: kept[r] = item_parser(item) return kept def compose(f, g): return lambda x: f(g(x)) def dictapply(d, fn): """ apply a function to all non-dict values in a dictionary """ for k, v in d.items(): if isinstance(v, dict): v = dictapply(v, fn) else: d[k] = fn(v) return d def Rscript_cmd(): """Retrieve path to locally installed Rscript or first in PATH. Prefers Rscript version installed via conda to a system version. """ rscript = which(os.path.join(os.path.dirname(sys.executable), "Rscript")) if rscript: return rscript else: return which("Rscript") def R_sitelib(): """Retrieve the R site-library installed with the bcbio installer. """ from bcbio import install return os.path.join(install.get_defaults().get("tooldir", "/usr/local"), "lib", "R", "site-library") def R_package_path(package): """ return the path to an installed R package """ local_sitelib = R_sitelib() rscript = Rscript_cmd() cmd = """{rscript} -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'""" try: output = subprocess.check_output(cmd.format(**locals()), shell=True) except subprocess.CalledProcessError, e: return None for line in output.split("\n"): if "[1]" not in line: continue dirname = line.split("[1]")[1].replace("\"", "").strip() if os.path.exists(dirname): return dirname return None def is_gzipped(fname): _, ext = os.path.splitext(fname) return ext in [".gz", "gzip"] def is_bzipped(fname): _, ext = os.path.splitext(fname) return ext in [".bz2", "bzip2"] def open_possible_gzip(fname, flag="r"): if is_gzipped(fname): if "b" not in flag: flag += "b" return gzip.open(fname, flag) else: return open(fname, flag) def filter_missing(xs): """ remove items from a list if they evaluate to False """ return filter(lambda x: x, xs) def rbind(dfs): """ acts like rbind for pandas dataframes """ if len(dfs) == 1: return dfs[0] df = dfs[0] for d in dfs[1:]: df = df.append(d) return df def max_command_length(): """ get the maximum length of the command line, in bytes, defaulting to a conservative number if not set http://www.in-ulm.de/~mascheck/various/argmax/ """ DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k try: arg_max = os.sysconf('SC_ARG_MAX') env_lines = len(os.environ) * 4 env_chars = sum([len(x) + len(y) for x, y in os.environ.iteritems()]) arg_length = arg_max - env_lines - 2048 except ValueError: arg_length = DEFAULT_MAX_LENGTH return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
mit
ssorgatem/qiime
qiime/group.py
15
35019
#!/usr/bin/env python """This module contains functions useful for obtaining groupings.""" __author__ = "Jai Ram Rideout" __copyright__ = "Copyright 2011, The QIIME project" __credits__ = ["Jai Ram Rideout", "Greg Caporaso", "Jeremy Widmann"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Jai Ram Rideout" __email__ = "jai.rideout@gmail.com" from collections import defaultdict from functools import partial import pandas as pd import numpy as np from qiime.stats import is_symmetric_and_hollow from qiime.parse import group_by_field, parse_mapping_file from qiime.filter import filter_mapping_file def get_grouped_distances(dist_matrix_header, dist_matrix, mapping_header, mapping, field, within=True, suppress_symmetry_and_hollowness_check=False): """Returns a list of distance groupings for the specified field. The return value is a list that contains tuples of three elements: the first two elements are the field values being compared, and the third element is a list of the distances. WARNING: Only symmetric, hollow distance matrices may be used as input. Asymmetric distance matrices, such as those obtained by the UniFrac Gain metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input. Arguments: - dist_matrix_header: The distance matrix header, obtained from parse.parse_distmat() - dist_matrix: The distance matrix, obtained from parse.parse_distmat(). - mapping_header: The mapping file header, obtained from parse.parse_mapping_file() - mapping: The mapping file's contents, obtained from parse.parse_mapping_file() - field: A field in the mapping file to do the grouping on. - within: If True, distances are grouped within a field value. If False, distances are grouped between field values. - suppress_symmetry_and_hollowness_check: By default, the input distance matrix will be checked for symmetry and hollowness. It is recommended to leave this check in place for safety, as the check is fairly fast. However, if you *know* you have a symmetric and hollow distance matrix, you can disable this check for small performance gains on extremely large distance matrices """ _validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping, field) mapping_data = [mapping_header] mapping_data.extend(mapping) groups = group_by_field(mapping_data, field) return _get_groupings(dist_matrix_header, dist_matrix, groups, within, suppress_symmetry_and_hollowness_check) def get_all_grouped_distances(dist_matrix_header, dist_matrix, mapping_header, mapping, field, within=True, suppress_symmetry_and_hollowness_check=False): """Returns a list of distances for either samples within each of the field values or between each of the field values for the specified field. WARNING: Only symmetric, hollow distance matrices may be used as input. Asymmetric distance matrices, such as those obtained by the UniFrac Gain metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input. Arguments: - dist_matrix_header: The distance matrix header, obtained from parse.parse_distmat() - dist_matrix: The distance matrix, obtained from parse.parse_distmat(). - mapping_header: The mapping file header, obtained from parse.parse_mapping_file() - mapping: The mapping file's contents, obtained from parse.parse_mapping_file() - field: A field in the mapping file to do the grouping on. - within: If True, distances are grouped within a field value. If False, distances are grouped between field values. - suppress_symmetry_and_hollowness_check: By default, the input distance matrix will be checked for symmetry and hollowness. It is recommended to leave this check in place for safety, as the check is fairly fast. However, if you *know* you have a symmetric and hollow distance matrix, you can disable this check for small performance gains on extremely large distance matrices """ distances = get_grouped_distances(dist_matrix_header, dist_matrix, mapping_header, mapping, field, within, suppress_symmetry_and_hollowness_check) results = [] for group in distances: for distance in group[2]: results.append(distance) return results def get_field_state_comparisons(dist_matrix_header, dist_matrix, mapping_header, mapping, field, comparison_field_states, suppress_symmetry_and_hollowness_check=False): """Returns a 2D dictionary relating distances between field states. The 2D dictionary is constructed such that each top-level key is a field state other than the field states in comparison_field_states. The second-level key is a field state from comparison_field_states, and the value at the (key, key) index is a list of distances between those two field states. Thus, given a field, this function will create comparisons between the specified comparison_field_states and all other field states. WARNING: Only symmetric, hollow distance matrices may be used as input. Asymmetric distance matrices, such as those obtained by the UniFrac Gain metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input. Arguments: - dist_matrix_header: The distance matrix header, obtained from parse.parse_distmat() - dist_matrix: The distance matrix, obtained from parse.parse_distmat(). - mapping_header: The mapping file header, obtained from parse.parse_mapping_file() - mapping: The mapping file's contents, obtained from parse.parse_mapping_file() - field: A field in the mapping file to do the comparisons on. - comparison_field_states: A list of strings specifying the field states to compare to all other field states. Cannot be an empty list. - suppress_symmetry_and_hollowness_check: By default, the input distance matrix will be checked for symmetry and hollowness. It is recommended to leave this check in place for safety, as the check is fairly fast. However, if you *know* you have a symmetric and hollow distance matrix, you can disable this check for small performance gains on extremely large distance matrices """ _validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping, field) # avoid empty groups of distances mapping_header, mapping = filter_mapping_file(mapping, mapping_header, dist_matrix_header) # Make sure each comparison group field state is in the specified field. if not comparison_field_states: raise ValueError("You must provide at least one field state to " "compare to all of the other field states.") mapping_data = [mapping_header] mapping_data.extend(mapping) groups = group_by_field(mapping_data, field) for field_state in comparison_field_states: if field_state not in groups: raise ValueError("The comparison group field state '%s' is not in " "the provided mapping file's field '%s'." % (field_state, field)) # Grab a list of all other field states (besides the ones in # comparison_field_states). These will be the field states that the states # in comparison_field_states will be compared against. field_states = [group for group in groups.keys() if group not in comparison_field_states] # Get between distance groupings for the field of interest. between_groupings = get_grouped_distances(dist_matrix_header, dist_matrix, mapping_header, mapping, field, within=False, suppress_symmetry_and_hollowness_check= suppress_symmetry_and_hollowness_check) # Build up our 2D dictionary giving the distances between a field state and # a comparison group field state by filtering out the between_groupings # list to include only the comparisons that we want. result = {} for field_state in field_states: result[field_state] = {} for comp_field_state in comparison_field_states: result[field_state][comp_field_state] = [] for group in between_groupings: if ((group[0] == field_state or group[1] == field_state) and (group[0] == comp_field_state or group[1] == comp_field_state)): # We've found a group of distances between our comparison # field state and the current field state, so keep the # data. result[field_state][comp_field_state] = group[2] return result def get_ordered_coordinates(coordinate_header, coordinate_matrix, order, strict=False): """ Return coordinate vectors in order coordinate_header: ids corresponding to vectors in coordinate_matrix (element 0 of output of qiime.parse.parse_coords) coordinate_matrix: the coordinate vectors (element 1 of output of qiime.parse.parse_coords) order: ordered ids from coordinate_header (usually sample ids) for coordinates that should be extracted strict: raise an error if an id from order is not present in coordinate_header (default: that id is ignored) The output of this function will be a tuple of the coordinate vectors corresponding to each id in order, and the id order: (ordered_coordinates, ordered_ids) Note that the output order can be a subset of the input order if some ids from order are not present in coordinate_header and strict == False. This function can be used in a way analogous to get_adjacent_distances to get a set of coordinates that might be connected by a line, for example. """ ordered_coordinates = [] ordered_ids = [] for o in order: try: coordinate_idx = coordinate_header.index(o) except ValueError: if strict: raise ValueError( "ID (%s) is not present in coordinate matrix" % o) else: pass else: ordered_coordinates.append(coordinate_matrix[coordinate_idx]) ordered_ids.append(o) return ordered_coordinates, ordered_ids def get_adjacent_distances(dist_matrix_header, dist_matrix, sample_ids, strict=False): """Return the distances between the adjacent sample_ids as a list dist_matrix_header: distance matrix headers, e.g. the output of qiime.parse.parse_distmat (element 0) dist_matrix: distance matrix, e.g., the output of qiime.parse.parse_distmat (element 1) sample_ids: a list of sample ids strict: boolean indicating whether to raise ValueError if a sample_id is not in dm (default: False; sample_ids not in dm are ignored) The output of this function will be a list of the distances between the adjacent sample_ids, and a list of the pair of sample ids corresponding to each distance. This could subsequently be used, for example, to plot unifrac distances between days in a timeseries, as d1 to d2, d2 to d3, d3 to d4, and so on. The list of pairs of sample ids are useful primarily in labeling axes when strict=False WARNING: Only symmetric, hollow distance matrices may be used as input. Asymmetric distance matrices, such as those obtained by the UniFrac Gain metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input. """ filtered_idx = [] filtered_sids = [] for sid in sample_ids: try: idx = dist_matrix_header.index(sid) except ValueError: if strict: raise ValueError( "Sample ID (%s) is not present in distance matrix" % sid) else: pass else: filtered_idx.append(idx) filtered_sids.append(sid) if len(filtered_idx) < 2: raise ValueError("At least two of your sample_ids must be present in the" " distance matrix. %d are present." % len(filtered_idx)) distance_results = [] header_results = [] for i in range(len(filtered_idx) - 1): distance_results.append( dist_matrix[filtered_idx[i]][filtered_idx[i + 1]]) header_results.append( (filtered_sids[i], filtered_sids[i + 1])) return distance_results, header_results def _group_by_sample_metadata(collapsed_md, sample_id_field="SampleID"): """Group sample identifiers by one or more metadata fields Parameters ---------- collapsed_md : pd.DataFrame The result of collapsing a sample metadata DataFrame, for example with collapse_metadata. sample_id_field : str, optional The sample id field in the mapping_f. Returns ------- dict Mapping of group id to set of input sample ids in that group. dict Mapping of input sample id to new group id. pd.DataFrame Sample metadata resulting from the collapse operation. Raises ------ KeyError If sample_id_field or any of the collapse fields are not column headers in mapping_f. """ new_index_to_group = {} old_index_to_new_index = {} for i in collapsed_md.index: old_indices = collapsed_md[sample_id_field][i] # this is a little ugly, but we need to handle single and multi-index # values here, and we always want to result to be a tuple if isinstance(i, tuple): new_index = i else: new_index = (i, ) new_index_to_group[new_index] = set(old_indices) for old_index in old_indices: old_index_to_new_index[old_index] = new_index return new_index_to_group, old_index_to_new_index def get_collapse_fns(): """ Return lookup of functions that can be used with biom.Table.collapse """ return {'median': _collapse_to_median, 'first': _collapse_to_first, 'random': _collapse_to_random, 'sum': _collapse_to_sum, 'mean': _collapse_to_mean} def collapse_samples(table, mapping_f, collapse_fields, collapse_mode): """ Collapse samples in a biom table and sample metadata Parameters ---------- table : biom.Table The biom table to be collapsed. mapping_f : file handle or filepath The sample metadata mapping file. collapse_fields : iterable The fields to combine when collapsing samples. For each sample in the mapping_f, the ordered values from these columns will be tuplized and used as the group identfier. Samples whose tuplized values in these fields are identical will be grouped. collapse_mode : str {sum, mean, median, random, first} The strategy to use for collapsing counts in the table. Returns ------- biom.Table The collapsed biom table. pd.DataFrame Sample metadata resulting from the collapse operation. Raises ------ KeyError If sample_id_field or any of the collapse fields are not column headers in mapping_f. """ collapsed_metadata = _collapse_metadata(mapping_f, collapse_fields) new_index_to_group, old_index_to_new_index = \ _group_by_sample_metadata(collapsed_metadata) partition_f = partial(_sample_id_from_group_id, sid_to_group_id=old_index_to_new_index) collapse_fns = get_collapse_fns() try: collapse_f = collapse_fns[collapse_mode] except KeyError: raise KeyError( "Unknown collapse function %s. Valid choices are: " "%s." % (collapse_mode, ', '.join(collapse_fns.keys()))) output_table = table.collapse( partition_f, collapse_f=collapse_f, norm=False, axis='sample') return collapsed_metadata, output_table def mapping_lines_from_collapsed_df(collapsed_df): """ Formats a multi-index DataFrame as lines of a QIIME mapping file Parameters ---------- collapsed_df : pd.DataFrame Sample metadata resulting from the collapse operation. Returns ------- list of strings Lines representing the text of a QIIME mapping file. """ lines = [] lines.append('\t'.join(['#SampleID', 'original-sample-ids'] +\ list(collapsed_df.columns)[1:])) for r in collapsed_df.iterrows(): # this is a little ugly, but we need to handle single and multi-index # values here if isinstance(r[0], tuple): new_idx = '.'.join(map(str, r[0])) else: new_idx = str(r[0]) new_values = [] for e in r[1]: if len(set(e)) == 1: # if all samples in the replicate group have the same # value for this column, just store that value new_values.append(str(e[0])) else: # if any samples in the replicate group differ in the value # in this column, store all of the values in the same order # as the ids in the new "original-sample-ids" column new_values.append('(%s)' % ', '.join(map(str,e))) lines.append('\t'.join([new_idx] + new_values)) return lines def _collapse_metadata(mapping_f, collapse_fields): """ Load a mapping file into a DataFrame and then collapse rows Parameters ---------- mapping_f : file handle or filepath The sample metadata mapping file. collapse_fields : iterable The fields to combine when collapsing samples. For each sample in the mapping_f, the ordered values from these columns will be tuplized and used as the group identfier. Samples whose tuplized values in these fields are identical will be grouped. Returns ------- pd.DataFrame Sample metadata resulting from the collapse operation. Raises ------ KeyError If sample_id_field or any of the collapse fields are not column headers in mapping_f. """ mapping_data, header, _ = parse_mapping_file(mapping_f) sample_md = pd.DataFrame(mapping_data, columns=header) grouped = sample_md.groupby(collapse_fields) collapsed_md = grouped.agg(lambda x: tuple(x)) return collapsed_md def _sample_id_from_group_id(id_, md, sid_to_group_id): try: group_id = sid_to_group_id[id_] except KeyError: raise KeyError("Sample id %s doesn't map to a group id." % id_) return '.'.join(map(str, group_id)) def _collapse_to_first(t, axis): return np.asarray([e[0] for e in t.iter_data(axis=axis, dense=True)]) def _collapse_to_median(t, axis): return np.asarray([np.median(e) for e in t.iter_data(axis=axis, dense=True)]) def _collapse_to_sum(t, axis): return np.asarray([np.sum(e) for e in t.iter_data(axis=axis)]) def _collapse_to_mean(t, axis): return np.asarray([np.mean(e) for e in t.iter_data(axis=axis)]) def _collapse_to_random(t, axis): if axis == 'sample': length = t.length("observation") elif axis == 'observation': length = t.length("sample") else: raise UnknownAxisError(axis) n = np.random.randint(length) return np.asarray([e[n] for e in t.iter_data(axis=axis, dense=True)]) def _validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping, field): """Validates the input data to make sure it can be used and makes sense. The headers, distance matrix, and mapping input should be iterable, and all data should not be None. The field must exist in the mapping header. """ if (dist_matrix_header is None or dist_matrix is None or mapping_header is None or mapping is None or field is None): raise ValueError("The input(s) cannot be 'None'.") # Make sure the appropriate input is iterable. for input_arg in (dist_matrix_header, dist_matrix, mapping_header, mapping): try: iter(input_arg) except: raise ValueError("The headers, distance matrix, and mapping data " "must be iterable.") # The field must be a string. if not isinstance(field, str): raise ValueError("The field must be a string.") # Make sure the field is in the mapping header. if field not in mapping_header: raise ValueError("The field '%s' is not in the mapping file header." % field) # check that we share sample identifiers between th mf and the dm if not set(zip(*mapping)[0]) & set(dist_matrix_header): raise ValueError('The mapping file does not share at least one sample' ' with the distance matrix.') def _get_indices(input_items, wanted_items): """Returns indices of the wanted items in the input items if present. input_items must be iterable, and wanted_items may be either a single value or a list. The return value will always be a list of indices, and an empty list if none were found. If wanted_items is a single string, it is treated as a scalar, not an iterable. """ # Note: Some of this code is taken from Jeremy Widmann's # get_valid_indices() function, part of make_distance_histograms.py from QIIME 1.8.0. try: iter(input_items) except: raise ValueError("The input_items to search must be iterable.") try: len(wanted_items) except: # We have a scalar value, so put it in a list. wanted_items = [wanted_items] if isinstance(wanted_items, basestring): wanted_items = [wanted_items] return [input_items.index(item) for item in wanted_items if item in input_items] def _get_groupings(dist_matrix_header, dist_matrix, groups, within=True, suppress_symmetry_and_hollowness_check=False): """Returns a list of distance groupings. The return value is a list that contains tuples of three elements: the first two elements are the field values being compared, and the third element is a list of the distances. WARNING: Only symmetric, hollow distance matrices may be used as input. Asymmetric distance matrices, such as those obtained by the UniFrac Gain metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input. Arguments: - dist_matrix_header: The distance matrix header. - dist_matrix: The distance matrix. - groups: A dictionary mapping field value to sample IDs, obtained by calling group_by_field(). - within: If True, distances are grouped within a field value. If False, distances are grouped between field values. - suppress_symmetry_and_hollowness_check: By default, the input distance matrix will be checked for symmetry and hollowness. It is recommended to leave this check in place for safety, as the check is fairly fast. However, if you *know* you have a symmetric and hollow distance matrix, you can disable this check for small performance gains on extremely large distance matrices If within is True, the zeros along the diagonal of the distance matrix are omitted. """ # Note: Much of this code is taken from Jeremy Widmann's # distances_by_groups() function, part of make_distance_histograms.py from QIIME 1.8.0. if not suppress_symmetry_and_hollowness_check: if not is_symmetric_and_hollow(dist_matrix): raise ValueError("The distance matrix must be symmetric and " "hollow.") result = [] group_items = groups.items() for i, (row_group, row_ids) in enumerate(group_items): row_indices = _get_indices(dist_matrix_header, row_ids) if within: # Handle the case where indices are the same so we need to omit # the diagonal. block = dist_matrix[row_indices][:, row_indices] size = len(row_indices) indices = [] for i in range(size): for j in range(i, size): if i != j: indices.append(block[i][j]) if indices: result.append((row_group, row_group, indices)) else: # Handle the case where indices are separate: just return blocks. for j in range(i + 1, len(groups)): col_group, col_ids = group_items[j] col_indices = _get_indices(dist_matrix_header, col_ids) vals = dist_matrix[row_indices][:, col_indices] # Flatten the array into a single-level list. vals = map(None, vals.flat) if vals: result.append((row_group, col_group, vals)) return result def extract_per_individual_states_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, filter_missing_data=True): """ sample_metadata : 2d dictionary mapping sample ids to metadata (as returned from qiime.parse.parse_mapping_file_to_dict) state_category: metadata category name describing state of interest (usually something like 'TreatmentState') as a string state_values: ordered list of values of interest in the state_category metadata entry (usually something like ['PreTreatment','PostTreatment']) individual_identifier_category: metadata category name describing the individual (usually something like 'PersonalID') as a string filter_missing_data: if True, an individual is excluded from the result object if any of it's values are None. This can occur when there is no sample for one or more of the state values for an individual. This is True by default. returns {'individual-identifier': [sample-id-at-state-value1, sample-id-at-state-value2, sample-id-at-state-value3, ...], ... } """ # prep the result object, which will be a dict of lists len_state_values = len(state_values) def inner_dict_constructor(): return [None] * len_state_values results = defaultdict(inner_dict_constructor) for sample_id, metadata in sample_metadata.items(): try: individual_id = metadata[individual_identifier_category] except KeyError: raise KeyError("%s is not a sample metadata category." % individual_identifier_category) try: state_value = metadata[state_category] except KeyError: raise KeyError("%s is not a sample metadata category." % state_category) try: state_index = state_values.index(state_value) except ValueError: # hit a state that is in the mapping file but not in # state_values - this is silently ignored continue results[individual_id][state_index] = sample_id if filter_missing_data: # delete individual results if sample ids corresponding to # any of the states are missing for individual_id, sample_ids in results.items(): if None in sample_ids: del results[individual_id] return results def extract_per_individual_state_metadatum_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, metadata_category, process_f=float): """ sample_metadata : 2d dictionary mapping sample ids to metadata (as returned from qiime.parse.parse_mapping_file_to_dict) state_category: metadata category name describing state of interest (usually something like 'TreatmentState') as a string state_values: ordered list of values of interest in the state_category metadata entry (usually something like ['PreTreatment','PostTreatment']) individual_identifier_category: metadata category name describing the individual (usually something like 'PersonalID') as a string metadata_category: metadata category to extract from sample_metadata process_f: function to apply to metadata values (default: float) returns {'individual-identifier': [state-1-metadata-value, state-2-metadata-value, ...], ... } """ per_individual_states = extract_per_individual_states_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, filter_missing_data=True) results = {} for individual_id, sample_ids in per_individual_states.items(): per_state_metadata_values = [] for sample_id in sample_ids: try: sample_metadata_value = sample_metadata[ sample_id][ metadata_category] except KeyError: raise KeyError( "%s is not a sample metadata category." % metadata_category) try: v = process_f(sample_metadata_value) except ValueError as e: v = None per_state_metadata_values.append(v) results[individual_id] = per_state_metadata_values return results def extract_per_individual_state_metadata_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, metadata_categories, process_f=float): """ sample_metadata : 2d dictionary mapping sample ids to metadata (as returned from qiime.parse.parse_mapping_file_to_dict) state_category: metadata category name describing state of interest (usually something like 'TreatmentState') as a string state_values: ordered list of values of interest in the state_category metadata entry (usually something like ['PreTreatment','PostTreatment']) individual_identifier_category: metadata category name describing the individual (usually something like 'PersonalID') as a string metadata_categories: metadata categories to extract from sample_metadata process_f: function to apply to metadata values (default: float) returns {'metadata-category-1': {'individual-identifier-1': [difference-in-metadata-value-bw-states-2-and-1, difference-in-metadata-value-bw-states-3-and-2, ...], 'individual-identifier-2: [difference-in-metadata-value-bw-states-2-and-1, difference-in-metadata-value-bw-states-3-and-2, ...], } ... } """ results = {} for metadata_category in metadata_categories: results[metadata_category] = \ extract_per_individual_state_metadatum_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, metadata_category, process_f) return results def extract_per_individual_state_metadata_from_sample_metadata_and_biom( sample_metadata, biom_table, state_category, state_values, individual_identifier_category, observation_ids=None): """ sample_metadata : 2d dictionary mapping sample ids to metadata (as returned from qiime.parse.parse_mapping_file_to_dict) biom_table: biom table object containing observation counts for samples in sample_metadata state_category: metadata category name describing state of interest (usually something like 'TreatmentState') as a string state_values: ordered list of values of interest in the state_category metadata entry (usually something like ['PreTreatment','PostTreatment']) individual_identifier_category: metadata category name describing the individual (usually something like 'PersonalID') as a string observation_ids: observations (usually OTUs) to extract from biom_table (default is all) returns {'otu1': {'individual-identifier-1: [difference-in-otu1-abundance-bw-states-2-and-1, difference-in-otu1-abundance-bw-states-3-and-2, ...], 'individual-identifier-2: [difference-in-otu1-abundance-bw-states-2-and-1, difference-in-otu1-abundance-bw-states-3-and-2, ...], } ... } """ per_individual_states = extract_per_individual_states_from_sample_metadata( sample_metadata, state_category, state_values, individual_identifier_category, filter_missing_data=True) results = {} if observation_ids is None: observation_ids = biom_table.ids(axis='observation') for observation_id in observation_ids: observation_data = biom_table.data(observation_id, 'observation') results[observation_id] = {} for individual_id, sample_ids in per_individual_states.items(): per_state_metadata_values = [] for sample_id in sample_ids: sample_index = biom_table.index(sample_id, 'sample') per_state_metadata_values.append( observation_data[sample_index]) results[observation_id][individual_id] = per_state_metadata_values return results
gpl-2.0
ashhher3/pylearn2
pylearn2/scripts/datasets/browse_small_norb.py
44
6901
#!/usr/bin/env python import sys import argparse import pickle import warnings import exceptions import numpy try: from matplotlib import pyplot except ImportError as import_error: warnings.warn("Can't use this script without matplotlib.") pyplot = None from pylearn2.datasets import norb warnings.warn("This script is deprecated. Please use ./browse_norb.py " "instead. It is kept around as a tester for deprecated class " "datasets.norb.SmallNORB", exceptions.DeprecationWarning) def main(): def parse_args(): parser = argparse.ArgumentParser( description="Browser for SmallNORB dataset.") parser.add_argument('--which_set', default='train', help="'train', 'test', or the path to a .pkl file") parser.add_argument('--zca', default=None, help=("if --which_set points to a .pkl " "file storing a ZCA-preprocessed " "NORB dataset, you can optionally " "enter the preprocessor's .pkl " "file path here to undo the " "ZCA'ing for visualization " "purposes.")) return parser.parse_args() def get_data(args): if args.which_set in ('train', 'test'): dataset = norb.SmallNORB(args.which_set, True) else: with open(args.which_set) as norb_file: dataset = pickle.load(norb_file) if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1: print("This viewer does not support NORB datasets that " "only have classification labels.") sys.exit(1) if args.zca is not None: with open(args.zca) as zca_file: zca = pickle.load(zca_file) dataset.X = zca.inverse(dataset.X) num_examples = dataset.X.shape[0] topo_shape = ((num_examples, ) + tuple(dataset.view_converter.shape)) assert topo_shape[-1] == 1 topo_shape = topo_shape[:-1] values = dataset.X.reshape(topo_shape) labels = numpy.array(dataset.y, 'int') return values, labels, dataset.which_set args = parse_args() values, labels, which_set = get_data(args) # For programming convenience, internally remap the instance labels to be # 0-4, and the azimuth labels to be 0-17. The user will still only see the # original, unmodified label values. instance_index = norb.SmallNORB.label_type_to_index['instance'] def remap_instances(which_set, labels): if which_set == 'train': new_to_old_instance = [4, 6, 7, 8, 9] elif which_set == 'test': new_to_old_instance = [0, 1, 2, 3, 5] num_instances = len(new_to_old_instance) old_to_new_instance = numpy.ndarray(10, 'int') old_to_new_instance.fill(-1) old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances) instance_slice = numpy.index_exp[:, instance_index] old_instances = labels[instance_slice] new_instances = old_to_new_instance[old_instances] labels[instance_slice] = new_instances azimuth_index = norb.SmallNORB.label_type_to_index['azimuth'] azimuth_slice = numpy.index_exp[:, azimuth_index] labels[azimuth_slice] = labels[azimuth_slice] / 2 return new_to_old_instance new_to_old_instance = remap_instances(which_set, labels) def get_new_azimuth_degrees(scalar_label): return 20 * scalar_label # Maps a label vector to the corresponding index in <values> num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int') num_labels_by_type[instance_index] = len(new_to_old_instance) label_to_index = numpy.ndarray(num_labels_by_type, 'int') label_to_index.fill(-1) for i, label in enumerate(labels): label_to_index[tuple(label)] = i assert not numpy.any(label_to_index == -1) # all elements have been set figure, axes = pyplot.subplots(1, 2, squeeze=True) figure.canvas.set_window_title('Small NORB dataset (%sing set)' % which_set) # shift subplots down to make more room for the text figure.subplots_adjust(bottom=0.05) num_label_types = len(norb.SmallNORB.num_labels_by_type) current_labels = numpy.zeros(num_label_types, 'int') current_label_type = [0, ] label_text = figure.suptitle("title text", x=0.1, horizontalalignment="left") def redraw(redraw_text, redraw_images): if redraw_text: cl = current_labels lines = [ 'category: %s' % norb.SmallNORB.get_category(cl[0]), 'instance: %d' % new_to_old_instance[cl[1]], 'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]), 'azimuth: %d' % get_new_azimuth_degrees(cl[3]), 'lighting: %d' % cl[4]] lt = current_label_type[0] lines[lt] = '==> ' + lines[lt] text = ('Up/down arrows choose label, left/right arrows change it' '\n\n' + '\n'.join(lines)) label_text.set_text(text) if redraw_images: index = label_to_index[tuple(current_labels)] image_pair = values[index, :, :, :] for i in range(2): axes[i].imshow(image_pair[i, :, :], cmap='gray') figure.canvas.draw() def on_key_press(event): def add_mod(arg, step, size): return (arg + size + step) % size def incr_label_type(step): current_label_type[0] = add_mod(current_label_type[0], step, num_label_types) def incr_label(step): lt = current_label_type[0] num_labels = num_labels_by_type[lt] current_labels[lt] = add_mod(current_labels[lt], step, num_labels) if event.key == 'up': incr_label_type(-1) redraw(True, False) elif event.key == 'down': incr_label_type(1) redraw(True, False) elif event.key == 'left': incr_label(-1) redraw(True, True) elif event.key == 'right': incr_label(1) redraw(True, True) elif event.key == 'q': sys.exit(0) figure.canvas.mpl_connect('key_press_event', on_key_press) redraw(True, True) pyplot.show() if __name__ == '__main__': main()
bsd-3-clause
ndchorley/scipy
scipy/stats/_binned_statistic.py
17
17622
from __future__ import division, print_function, absolute_import import warnings import numpy as np from scipy._lib.six import callable from collections import namedtuple def binned_statistic(x, values, statistic='mean', bins=10, range=None): """ Compute a binned statistic for a set of data. This is a generalization of a histogram function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- x : array_like A sequence of values to be binned. values : array_like The values on which the statistic will be computed. This must be the same shape as `x`. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or sequence of scalars, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10 by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. Values in `x` that are smaller than lowest bin edge are assigned to bin number 0, values beyond the highest bin are assigned to ``bins[-1]``. range : (float, float) or [(float, float)], optional The lower and upper range of the bins. If not provided, range is simply ``(x.min(), x.max())``. Values outside the range are ignored. Returns ------- statistic : array The values of the selected statistic in each bin. bin_edges : array of dtype float Return the bin edges ``(length(statistic)+1)``. binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as values. See Also -------- numpy.histogram, binned_statistic_2d, binned_statistic_dd Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 0.11.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt First a basic example: >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', ... bins=3) (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3])) As a second example, we now generate some random data of sailing boat speed as a function of wind speed, and then determine how fast our boat is for certain wind speeds: >>> windspeed = 8 * np.random.rand(500) >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) >>> plt.figure() >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, ... label='binned statistic of data') >>> plt.legend() Now we can use ``binnumber`` to select all datapoints with a windspeed below 1: >>> low_boatspeed = boatspeed[binnumber == 0] As a final example, we will use ``bin_edges`` and ``binnumber`` to make a plot of a distribution that shows the mean and distribution around that mean per bin, on top of a regular histogram and the probability distribution function: >>> x = np.linspace(0, 5, num=500) >>> x_pdf = stats.maxwell.pdf(x) >>> samples = stats.maxwell.rvs(size=10000) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, ... statistic='mean', bins=25) >>> bin_width = (bin_edges[1] - bin_edges[0]) >>> bin_centers = bin_edges[1:] - bin_width/2 >>> plt.figure() >>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2, ... label='histogram of data') >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, ... label='binned statistic of data') >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) >>> plt.legend(fontsize=10) >>> plt.show() """ try: N = len(bins) except TypeError: N = 1 if N != 1: bins = [np.asarray(bins, float)] if range is not None: if len(range) == 2: range = [range] medians, edges, xy = binned_statistic_dd([x], values, statistic, bins, range) BinnedStatisticResult = namedtuple('BinnedStatisticResult', ('statistic', 'bin_edges', 'binnumber')) return BinnedStatisticResult(medians, edges[0], xy) def binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None): """ Compute a bidimensional binned statistic for a set of data. This is a generalization of a histogram2d function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- x : (N,) array_like A sequence of values to be binned along the first dimension. y : (M,) array_like A sequence of values to be binned along the second dimension. values : (N,) array_like The values on which the statistic will be computed. This must be the same shape as `x`. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or [int, int] or array_like or [array, array], optional The bin specification: * the number of bins for the two dimensions (nx=ny=bins), * the number of bins in each dimension (nx, ny = bins), * the bin edges for the two dimensions (x_edges = y_edges = bins), * the bin edges in each dimension (x_edges, y_edges = bins). range : (2,2) array_like, optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be considered outliers and not tallied in the histogram. Returns ------- statistic : (nx, ny) ndarray The values of the selected statistic in each two-dimensional bin x_edges : (nx + 1) ndarray The bin edges along the first dimension. y_edges : (ny + 1) ndarray The bin edges along the second dimension. binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as `values`. See Also -------- numpy.histogram2d, binned_statistic, binned_statistic_dd Notes ----- .. versionadded:: 0.11.0 """ # This code is based on np.histogram2d try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = np.asarray(bins, float) bins = [xedges, yedges] medians, edges, xy = binned_statistic_dd([x, y], values, statistic, bins, range) BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', ('statistic', 'x_edge', 'y_edge', 'binnumber')) return BinnedStatistic2dResult(medians, edges[0], edges[1], xy) def binned_statistic_dd(sample, values, statistic='mean', bins=10, range=None): """ Compute a multidimensional binned statistic for a set of data. This is a generalization of a histogramdd function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- sample : array_like Data to histogram passed as a sequence of D arrays of length N, or as an (N,D) array. values : array_like The values on which the statistic will be computed. This must be the same shape as x. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitely in `bins`. Defaults to the minimum and maximum values along each dimension. Returns ------- statistic : ndarray, shape(nx1, nx2, nx3,...) The values of the selected statistic in each two-dimensional bin bin_edges : list of ndarrays A list of D arrays describing the (nxi + 1) bin edges for each dimension binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as values. See Also -------- np.histogramdd, binned_statistic, binned_statistic_2d Notes ----- .. versionadded:: 0.11.0 """ known_stats = ['mean', 'median', 'count', 'sum', 'std'] if not callable(statistic) and statistic not in known_stats: raise ValueError('invalid statistic %r' % (statistic,)) # This code is based on np.histogramdd try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = np.atleast_2d(sample).T N, D = sample.shape nbin = np.empty(D, int) edges = D * [None] dedges = D * [None] try: M = len(bins) if M != D: raise AttributeError('The dimension of bins must be equal ' 'to the dimension of the sample x.') except TypeError: bins = D * [bins] # Select range for each dimension # Used only if number of bins is given. if range is None: smin = np.atleast_1d(np.array(sample.min(0), float)) smax = np.atleast_1d(np.array(sample.max(0), float)) else: smin = np.zeros(D) smax = np.zeros(D) for i in np.arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in np.arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # Create edge arrays for i in np.arange(D): if np.isscalar(bins[i]): nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1) else: edges[i] = np.asarray(bins[i], float) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = np.diff(edges[i]) nbin = np.asarray(nbin) # Compute the bin number each sample falls into. Ncount = {} for i in np.arange(D): Ncount[i] = np.digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right # edge to be counted in the last bin, and not as an outlier. for i in np.arange(D): # Rounding precision decimal = int(-np.log10(dedges[i].min())) + 6 # Find which points are on the rightmost edge. on_edge = np.where(np.around(sample[:, i], decimal) == np.around(edges[i][-1], decimal))[0] # Shift these points one bin to the left. Ncount[i][on_edge] -= 1 # Compute the sample indices in the flattened statistic matrix. ni = nbin.argsort() xy = np.zeros(N, int) for i in np.arange(0, D - 1): xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod() xy += Ncount[ni[-1]] result = np.empty(nbin.prod(), float) if statistic == 'mean': result.fill(np.nan) flatcount = np.bincount(xy, None) flatsum = np.bincount(xy, values) a = flatcount.nonzero() result[a] = flatsum[a] / flatcount[a] elif statistic == 'std': result.fill(0) flatcount = np.bincount(xy, None) flatsum = np.bincount(xy, values) flatsum2 = np.bincount(xy, values ** 2) a = flatcount.nonzero() result[a] = np.sqrt(flatsum2[a] / flatcount[a] - (flatsum[a] / flatcount[a]) ** 2) elif statistic == 'count': result.fill(0) flatcount = np.bincount(xy, None) a = np.arange(len(flatcount)) result[a] = flatcount elif statistic == 'sum': result.fill(0) flatsum = np.bincount(xy, values) a = np.arange(len(flatsum)) result[a] = flatsum elif statistic == 'median': result.fill(np.nan) for i in np.unique(xy): result[i] = np.median(values[xy == i]) elif callable(statistic): with warnings.catch_warnings(): # Numpy generates a warnings for mean/std/... with empty list warnings.filterwarnings('ignore', category=RuntimeWarning) old = np.seterr(invalid='ignore') try: null = statistic([]) except: null = np.nan np.seterr(**old) result.fill(null) for i in np.unique(xy): result[i] = statistic(values[xy == i]) # Shape into a proper matrix result = result.reshape(np.sort(nbin)) for i in np.arange(nbin.size): j = ni.argsort()[i] result = result.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D * [slice(1, -1)] result = result[core] if (result.shape != nbin - 2).any(): raise RuntimeError('Internal Shape Error') BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', ('statistic', 'bin_edges', 'binnumber')) return BinnedStatisticddResult(result, edges, xy)
bsd-3-clause
PatrickOReilly/scikit-learn
examples/manifold/plot_swissroll.py
330
1446
""" =================================== Swiss Roll reduction with LLE =================================== An illustration of Swiss Roll reduction with locally linear embedding """ # Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr> # License: BSD 3 clause (C) INRIA 2011 print(__doc__) import matplotlib.pyplot as plt # This import is needed to modify the way figure behaves from mpl_toolkits.mplot3d import Axes3D Axes3D #---------------------------------------------------------------------- # Locally linear embedding of the swiss roll from sklearn import manifold, datasets X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500) print("Computing LLE embedding") X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12, n_components=2) print("Done. Reconstruction error: %g" % err) #---------------------------------------------------------------------- # Plot result fig = plt.figure() try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(211, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral) except: ax = fig.add_subplot(211) ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral) ax.set_title("Original data") ax = fig.add_subplot(212) ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral) plt.axis('tight') plt.xticks([]), plt.yticks([]) plt.title('Projected data') plt.show()
bsd-3-clause
fbagirov/scikit-learn
examples/linear_model/plot_ard.py
248
2622
""" ================================================== Automatic Relevance Determination Regression (ARD) ================================================== Fit regression model with Bayesian Ridge Regression. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. The histogram of the estimated weights is very peaked, as a sparsity-inducing prior is implied on the weights. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression ############################################################################### # Generating simulated data with Gaussian weights # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create Gaussian data X = np.random.randn(n_samples, n_features) # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noite with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the ARD Regression clf = ARDRegression(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot the true weights, the estimated weights and the histogram of the # weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="ARD estimate") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.plot(w, 'g-', label="Ground truth") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
espenhgn/nest-simulator
pynest/examples/twoneurons.py
3
1260
# -*- coding: utf-8 -*- # # twoneurons.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """Two neuron example ---------------------------- See Also ~~~~~~~~~~ :doc:`one_neuron` """ import nest import nest.voltage_trace import matplotlib.pyplot as plt weight = 20.0 delay = 1.0 stim = 1000.0 neuron1 = nest.Create("iaf_psc_alpha") neuron2 = nest.Create("iaf_psc_alpha") voltmeter = nest.Create("voltmeter") neuron1.I_e = stim nest.Connect(neuron1, neuron2, syn_spec={'weight': weight, 'delay': delay}) nest.Connect(voltmeter, neuron2) nest.Simulate(100.0) nest.voltage_trace.from_device(voltmeter) plt.show()
gpl-2.0
jorik041/scikit-learn
examples/cluster/plot_lena_segmentation.py
271
2444
""" ========================================= Segmenting the picture of Lena in regions ========================================= This example uses :ref:`spectral_clustering` on a graph created from voxel-to-voxel difference on an image to break this image into multiple partly-homogeneous regions. This procedure (spectral clustering on an image) is an efficient approximate solution for finding normalized graph cuts. There are two options to assign labels: * with 'kmeans' spectral clustering will cluster samples in the embedding space using a kmeans algorithm * whereas 'discrete' will iteratively search for the closest partition space to the embedding space. """ print(__doc__) # Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung # License: BSD 3 clause import time import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering lena = sp.misc.lena() # Downsample the image by a factor of 4 lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2] lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2] # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(lena) # Take a decreasing function of the gradient: an exponential # The smaller beta is, the more independent the segmentation is of the # actual image. For beta=1, the segmentation is close to a voronoi beta = 5 eps = 1e-6 graph.data = np.exp(-beta * graph.data / lena.std()) + eps # Apply spectral clustering (this step goes much faster if you have pyamg # installed) N_REGIONS = 11 ############################################################################### # Visualize the resulting regions for assign_labels in ('kmeans', 'discretize'): t0 = time.time() labels = spectral_clustering(graph, n_clusters=N_REGIONS, assign_labels=assign_labels, random_state=1) t1 = time.time() labels = labels.reshape(lena.shape) plt.figure(figsize=(5, 5)) plt.imshow(lena, cmap=plt.cm.gray) for l in range(N_REGIONS): plt.contour(labels == l, contours=1, colors=[plt.cm.spectral(l / float(N_REGIONS)), ]) plt.xticks(()) plt.yticks(()) plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))) plt.show()
bsd-3-clause
sgkang/GeophysicsToy
seismic/EOSC350widget.py
4
7381
import scipy.io import numpy as np import matplotlib.pyplot as plt def ViewWiggle(syndata, obsdata): dx = 20 fig, ax = plt.subplots(1, 2, figsize=(14, 8)) kwargs = { 'skipt':1, 'scale': 0.05, 'lwidth': 1., 'dx': dx, 'sampr': 0.004, 'clip' : dx*10., } extent = [0., 38*dx, 1.0, 0.] ax[0].invert_yaxis() ax[1].invert_yaxis() wiggle(syndata, ax = ax[0], **kwargs) wiggle(obsdata, ax = ax[1], **kwargs) ax[0].set_xlabel("Offset (m)") ax[0].set_ylabel("Time (s)") ax[0].set_title("Clean CMP gather") ax[1].set_xlabel("Offset (m)") ax[1].set_ylabel("Time (s)") ax[1].set_title("Noisy CMP gather") def NoisyNMOWidget(t0, v1, v2, v3): syndata = np.load('obsdata1.npy') np.random.randn() dx = 20 xorig = np.arange(38)*dx time1 = HyperbolicFun(t0, xorig, v1) time2 = HyperbolicFun(t0, xorig, v2) time3 = HyperbolicFun(t0, xorig, v3) fig, ax = plt.subplots(1, 2, figsize=(14, 8)) kwargs = { 'skipt':1, 'scale': 0.05, 'lwidth': 1., 'dx': dx, 'sampr': 0.004, 'clip' : dx*10., } extent = [0., 38*dx, 1.0, 0.] ax[0].invert_yaxis() ax[1].invert_yaxis() wiggle(syndata, ax = ax[0], **kwargs) toffset = np.sqrt(xorig**2/v2**2+t0**2)-t0 wiggle(syndata, ax = ax[1], manthifts=toffset, **kwargs) ax[0].axis(extent) ax[1].axis(extent) ax[0].plot(xorig, time1, 'b', lw=2) ax[0].plot(xorig, time2, 'r', lw=2) ax[0].plot(xorig, time3, 'g', lw=2) ax[0].set_xlabel("Offset (m)") ax[1].set_xlabel("Offset (m)") ax[0].set_ylabel("Time (s)") ax[1].set_ylabel("Time (s)") ax[0].set_title("CMP gather") ax[1].set_title("NMO corrected CMP gather") def CleanNMOWidget(t0, v): syndata = np.load('syndata1.npy') np.random.randn() dx = 20 xorig = np.arange(38)*dx time = HyperbolicFun(t0, xorig, v) fig, ax = plt.subplots(1, 2, figsize=(14, 8)) kwargs = { 'skipt':1, 'scale': 0.05, 'lwidth': 1., 'dx': dx, 'sampr': 0.004, 'clip' : dx*10., } extent = [0., 38*dx, 1.0, 0.] ax[0].invert_yaxis() ax[1].invert_yaxis() wiggle(syndata, ax = ax[0], **kwargs) toffset = np.sqrt(xorig**2/v**2+t0**2)-t0 wiggle(syndata, ax = ax[1], manthifts=toffset, **kwargs) ax[0].axis(extent) ax[1].axis(extent) ax[0].plot(xorig, time, 'b', lw=2) ax[0].set_xlabel("Offset (m)") ax[1].set_xlabel("Offset (m)") ax[0].set_ylabel("Time (s)") ax[1].set_ylabel("Time (s)") ax[0].set_title("CMP gather") ax[1].set_title("NMO corrected CMP gather") def HyperbolicFun(t0, x, velocity): time = np.sqrt(x**2/velocity**2+t0**2) return time def NMOstackthree(data, tintercept, v1, v2, v3): dx = 20. xorig = np.arange(38)*dx time = np.load('time1.npy') traces = np.zeros((3,time.size)) vtemp = np.r_[v1, v2, v3] for itry in range(3): traces[itry,:] = NMOstack(data, xorig, time, vtemp[itry]) fig, ax = plt.subplots(1, 3, figsize=(10, 8)) kwargs = { 'skipt':1, 'scale': 2., 'lwidth': 1., 'sampr': 0.004, 'clip' : 10, } for i in range(3): extent = [traces[i,:].min(), traces[i,:].max(), time.max(), time.min()] ax[i].invert_yaxis() ax[i].axis(extent) wiggle(traces[i,:].reshape([1,-1]), ax=ax[i], **kwargs) ax[i].set_xlabel("Amplitude") ax[i].set_ylabel("Time (s)") ax[i].set_title(("Velocity = %6.1f")%(vtemp[i])) def NMOstack(data, xorig, time, v): if np.isscalar(v): v = np.ones_like(time)*v Time = (time.reshape([1,-1])).repeat(data.shape[0], axis=0) singletrace = np.zeros(data.shape[1]) for i in range(time.size): toffset = np.sqrt(xorig**2/v[i]**2+time[i]**2) Time = (time.reshape([1,-1])).repeat(data.shape[0], axis=0) Toffset = (toffset.reshape([-1,1])).repeat(data.shape[1], axis=1) indmin = np.argmin(abs(Time-Toffset), axis=1) singletrace[i] = (mkvc(data)[sub2ind(data.shape, np.c_[np.arange(data.shape[0]), indmin])]).sum() return singletrace def NMOstackSingle(data, tintercept, v): dx = 20. xorig = np.arange(38)*dx time = np.load('time1.npy') singletrace = NMOstack(data, xorig, time, v) fig, ax = plt.subplots(1, 1, figsize=(7, 8)) kwargs = { 'skipt':1, 'scale': 2., 'lwidth': 1., 'sampr': 0.004, 'ax': ax, 'clip' : 10, } extent = [singletrace.min(), singletrace.max(), time.max(), time.min()] ax.invert_yaxis() ax.axis(extent) wiggle(singletrace.reshape([1,-1]), **kwargs) ax.set_xlabel("Amplitude") ax.set_ylabel("Time (s)") def clipsign (value, clip): clipthese = abs(value) > clip return value * ~clipthese + np.sign(value)*clip*clipthese def wiggle (traces, skipt=1,scale=1.,lwidth=.1,offsets=None,redvel=0., manthifts=None, tshift=0.,sampr=1.,clip=10., dx=1., color='black',fill=True,line=True, ax=None): ns = traces.shape[1] ntr = traces.shape[0] t = np.arange(ns)*sampr timereduce = lambda offsets, redvel, shift: [float(offset) / redvel + shift for offset in offsets] if (offsets is not None): shifts = timereduce(offsets, redvel, tshift) elif (manthifts is not None): shifts = manthifts else: shifts = np.zeros((ntr,)) for i in range(0, ntr, skipt): trace = traces[i].copy() trace[0] = 0 trace[-1] = 0 if ax == None: if (line): plt.plot(i*dx + clipsign(trace / scale, clip), t - shifts[i], color=color, linewidth=lwidth) if (fill): for j in range(ns): if (trace[j] < 0): trace[j] = 0 plt.fill(i*dx + clipsign(trace / scale, clip), t - shifts[i], color=color, linewidth=0) else: if (line): ax.plot(i*dx + clipsign(trace / scale, clip), t - shifts[i], color=color, linewidth=lwidth) if (fill): for j in range(ns): if (trace[j] < 0): trace[j] = 0 ax.fill(i*dx + clipsign(trace / scale, clip), t - shifts[i], color=color, linewidth=0) def sub2ind(shape, subs): """ Extracted from SimPEG for temporary use (https://github.com/simpeg) From the given shape, returns the index of the given subscript """ if len(shape) == 1: return subs if type(subs) is not np.ndarray: subs = np.array(subs) if len(subs.shape) == 1: subs = subs[np.newaxis,:] assert subs.shape[1] == len(shape), 'Indexing must be done as a column vectors. e.g. [[3,6],[6,2],...]' inds = np.ravel_multi_index(subs.T, shape, order='F') return mkvc(inds) def mkvc(x, numDims=1): """ Extracted from SimPEG for temporary use (https://github.com/simpeg) Creates a vector with the number of dimension specified e.g.:: a = np.array([1, 2, 3]) mkvc(a, 1).shape > (3, ) mkvc(a, 2).shape > (3, 1) mkvc(a, 3).shape > (3, 1, 1) """ if type(x) == np.matrix: x = np.array(x) if hasattr(x, 'tovec'): x = x.tovec() assert isinstance(x, np.ndarray), "Vector must be a numpy array" if numDims == 1: return x.flatten(order='F') elif numDims == 2: return x.flatten(order='F')[:, np.newaxis] elif numDims == 3: return x.flatten(order='F')[:, np.newaxis, np.newaxis]
mit
andyh616/mne-python
mne/viz/misc.py
13
19748
"""Functions to make simple plots with M/EEG data """ from __future__ import print_function # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Denis Engemann <denis.engemann@gmail.com> # Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Eric Larson <larson.eric.d@gmail.com> # Cathy Nangini <cnangini@gmail.com> # Mainak Jas <mainak@neuro.hut.fi> # # License: Simplified BSD import copy import warnings from glob import glob import os.path as op from itertools import cycle import numpy as np from scipy import linalg from ..surface import read_surface from ..io.proj import make_projector from ..utils import logger, verbose, get_subjects_dir from ..io.pick import pick_types from .utils import tight_layout, COLORS, _prepare_trellis @verbose def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True, show=True, verbose=None): """Plot Covariance data Parameters ---------- cov : instance of Covariance The covariance matrix. info: dict Measurement info. exclude : list of string | str List of channels to exclude. If empty do not exclude any channel. If 'bads', exclude info['bads']. colorbar : bool Show colorbar or not. proj : bool Apply projections or not. show_svd : bool Plot also singular values of the noise covariance for each sensor type. We show square roots ie. standard deviations. show : bool Show figure if True. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- fig_cov : instance of matplotlib.pyplot.Figure The covariance plot. fig_svd : instance of matplotlib.pyplot.Figure | None The SVD spectra plot of the covariance. """ if exclude == 'bads': exclude = info['bads'] ch_names = [n for n in cov.ch_names if n not in exclude] ch_idx = [cov.ch_names.index(n) for n in ch_names] info_ch_names = info['ch_names'] sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=exclude) sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False, exclude=exclude) sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False, exclude=exclude) idx_eeg = [ch_names.index(info_ch_names[c]) for c in sel_eeg if info_ch_names[c] in ch_names] idx_mag = [ch_names.index(info_ch_names[c]) for c in sel_mag if info_ch_names[c] in ch_names] idx_grad = [ch_names.index(info_ch_names[c]) for c in sel_grad if info_ch_names[c] in ch_names] idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6), (idx_grad, 'Gradiometers', 'fT/cm', 1e13), (idx_mag, 'Magnetometers', 'fT', 1e15)] idx_names = [(idx, name, unit, scaling) for idx, name, unit, scaling in idx_names if len(idx) > 0] C = cov.data[ch_idx][:, ch_idx] if proj: projs = copy.deepcopy(info['projs']) # Activate the projection items for p in projs: p['active'] = True P, ncomp, _ = make_projector(projs, ch_names) if ncomp > 0: logger.info(' Created an SSP operator (subspace dimension' ' = %d)' % ncomp) C = np.dot(P, np.dot(C, P.T)) else: logger.info(' The projection vectors do not apply to these ' 'channels.') import matplotlib.pyplot as plt fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7)) for k, (idx, name, _, _) in enumerate(idx_names): plt.subplot(1, len(idx_names), k + 1) plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r') plt.title(name) plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26) tight_layout(fig=fig_cov) fig_svd = None if show_svd: fig_svd = plt.figure() for k, (idx, name, unit, scaling) in enumerate(idx_names): s = linalg.svd(C[idx][:, idx], compute_uv=False) plt.subplot(1, len(idx_names), k + 1) plt.ylabel('Noise std (%s)' % unit) plt.xlabel('Eigenvalue index') plt.semilogy(np.sqrt(s) * scaling) plt.title(name) tight_layout(fig=fig_svd) if show: plt.show() return fig_cov, fig_svd def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None, source_index=None, colorbar=False, show=True): """Plot source power in time-freqency grid. Parameters ---------- stcs : list of SourceEstimate Source power for consecutive time windows, one SourceEstimate object should be provided for each frequency bin. freq_bins : list of tuples of float Start and end points of frequency bins of interest. tmin : float Minimum time instant to show. tmax : float Maximum time instant to show. source_index : int | None Index of source for which the spectrogram will be plotted. If None, the source with the largest activation will be selected. colorbar : bool If true, a colorbar will be added to the plot. show : bool Show figure if True. """ import matplotlib.pyplot as plt # Input checks if len(stcs) == 0: raise ValueError('cannot plot spectrogram if len(stcs) == 0') stc = stcs[0] if tmin is not None and tmin < stc.times[0]: raise ValueError('tmin cannot be smaller than the first time point ' 'provided in stcs') if tmax is not None and tmax > stc.times[-1] + stc.tstep: raise ValueError('tmax cannot be larger than the sum of the last time ' 'point and the time step, which are provided in stcs') # Preparing time-frequency cell boundaries for plotting if tmin is None: tmin = stc.times[0] if tmax is None: tmax = stc.times[-1] + stc.tstep time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep) freq_bounds = sorted(set(np.ravel(freq_bins))) freq_ticks = copy.deepcopy(freq_bounds) # Reject time points that will not be plotted and gather results source_power = [] for stc in stcs: stc = stc.copy() # copy since crop modifies inplace stc.crop(tmin, tmax - stc.tstep) source_power.append(stc.data) source_power = np.array(source_power) # Finding the source with maximum source power if source_index is None: source_index = np.unravel_index(source_power.argmax(), source_power.shape)[1] # If there is a gap in the frequency bins record its locations so that it # can be covered with a gray horizontal bar gap_bounds = [] for i in range(len(freq_bins) - 1): lower_bound = freq_bins[i][1] upper_bound = freq_bins[i + 1][0] if lower_bound != upper_bound: freq_bounds.remove(lower_bound) gap_bounds.append((lower_bound, upper_bound)) # Preparing time-frequency grid for plotting time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds) # Plotting the results fig = plt.figure(figsize=(9, 6)) plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :], cmap='Reds') ax = plt.gca() plt.title('Time-frequency source power') plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') time_tick_labels = [str(np.round(t, 2)) for t in time_bounds] n_skip = 1 + len(time_bounds) // 10 for i in range(len(time_bounds)): if i % n_skip != 0: time_tick_labels[i] = '' ax.set_xticks(time_bounds) ax.set_xticklabels(time_tick_labels) plt.xlim(time_bounds[0], time_bounds[-1]) plt.yscale('log') ax.set_yticks(freq_ticks) ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks]) plt.ylim(freq_bounds[0], freq_bounds[-1]) plt.grid(True, ls='-') if colorbar: plt.colorbar() tight_layout(fig=fig) # Covering frequency gaps with horizontal bars for lower_bound, upper_bound in gap_bounds: plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound - lower_bound, time_bounds[0], color='#666666') if show: plt.show() return fig def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal', slices=None, show=True): """Plot BEM contours on anatomical slices. Parameters ---------- mri_fname : str The name of the file containing anatomical data. surf_fnames : list of str The filenames for the BEM surfaces in the format ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf']. orientation : str 'coronal' or 'axial' or 'sagittal' slices : list of int Slice indices. show : bool Show figure if True. Returns ------- fig : Instance of matplotlib.figure.Figure The figure. """ import matplotlib.pyplot as plt import nibabel as nib if orientation not in ['coronal', 'axial', 'sagittal']: raise ValueError("Orientation must be 'coronal', 'axial' or " "'sagittal'. Got %s." % orientation) # Load the T1 data nim = nib.load(mri_fname) data = nim.get_data() affine = nim.get_affine() n_sag, n_axi, n_cor = data.shape orientation_name2axis = dict(sagittal=0, axial=1, coronal=2) orientation_axis = orientation_name2axis[orientation] if slices is None: n_slices = data.shape[orientation_axis] slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int) # create of list of surfaces surfs = list() trans = linalg.inv(affine) # XXX : next line is a hack don't ask why trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2] for surf_fname in surf_fnames: surf = dict() surf['rr'], surf['tris'] = read_surface(surf_fname) # move back surface to MRI coordinate system surf['rr'] = nib.affines.apply_affine(trans, surf['rr']) surfs.append(surf) fig, axs = _prepare_trellis(len(slices), 4) for ax, sl in zip(axs, slices): # adjust the orientations for good view if orientation == 'coronal': dat = data[:, :, sl].transpose() elif orientation == 'axial': dat = data[:, sl, :] elif orientation == 'sagittal': dat = data[sl, :, :] # First plot the anatomical data ax.imshow(dat, cmap=plt.cm.gray) ax.axis('off') # and then plot the contours on top for surf in surfs: if orientation == 'coronal': ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1], surf['tris'], surf['rr'][:, 2], levels=[sl], colors='yellow', linewidths=2.0) elif orientation == 'axial': ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0], surf['tris'], surf['rr'][:, 1], levels=[sl], colors='yellow', linewidths=2.0) elif orientation == 'sagittal': ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1], surf['tris'], surf['rr'][:, 0], levels=[sl], colors='yellow', linewidths=2.0) plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0., hspace=0.) if show: plt.show() return fig def plot_bem(subject=None, subjects_dir=None, orientation='coronal', slices=None, show=True): """Plot BEM contours on anatomical slices. Parameters ---------- subject : str Subject name. subjects_dir : str | None Path to the SUBJECTS_DIR. If None, the path is obtained by using the environment variable SUBJECTS_DIR. orientation : str 'coronal' or 'axial' or 'sagittal'. slices : list of int Slice indices. show : bool Show figure if True. Returns ------- fig : Instance of matplotlib.figure.Figure The figure. """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) # Get the MRI filename mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') if not op.isfile(mri_fname): raise IOError('MRI file "%s" does not exist' % mri_fname) # Get the BEM surface filenames bem_path = op.join(subjects_dir, subject, 'bem') if not op.isdir(bem_path): raise IOError('Subject bem directory "%s" does not exist' % bem_path) surf_fnames = [] for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']: surf_fname = glob(op.join(bem_path, surf_name + '.surf')) if len(surf_fname) > 0: surf_fname = surf_fname[0] logger.info("Using surface: %s" % surf_fname) surf_fnames.append(surf_fname) if len(surf_fnames) == 0: raise IOError('No surface files found. Surface files must end with ' 'inner_skull.surf, outer_skull.surf or outer_skin.surf') # Plot the contours return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation, slices=slices, show=show) def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None, axes=None, equal_spacing=True, show=True): """Plot events to get a visual display of the paradigm Parameters ---------- events : array, shape (n_events, 3) The events. sfreq : float | None The sample frequency. If None, data will be displayed in samples (not seconds). first_samp : int The index of the first sample. Typically the raw.first_samp attribute. It is needed for recordings on a Neuromag system as the events are defined relative to the system start and not to the beginning of the recording. color : dict | None Dictionary of event_id value and its associated color. If None, colors are automatically drawn from a default list (cycled through if number of events longer than list of default colors). event_id : dict | None Dictionary of event label (e.g. 'aud_l') and its associated event_id value. Label used to plot a legend. If None, no legend is drawn. axes : instance of matplotlib.axes.AxesSubplot The subplot handle. equal_spacing : bool Use equal spacing between events in y-axis. show : bool Show figure if True. Returns ------- fig : matplotlib.figure.Figure The figure object containing the plot. Notes ----- .. versionadded:: 0.9.0 """ if sfreq is None: sfreq = 1.0 xlabel = 'samples' else: xlabel = 'Time (s)' events = np.asarray(events) unique_events = np.unique(events[:, 2]) if event_id is not None: # get labels and unique event ids from event_id dict, # sorted by value event_id_rev = dict((v, k) for k, v in event_id.items()) conditions, unique_events_id = zip(*sorted(event_id.items(), key=lambda x: x[1])) for this_event in unique_events_id: if this_event not in unique_events: raise ValueError('%s from event_id is not present in events.' % this_event) for this_event in unique_events: if this_event not in unique_events_id: warnings.warn('event %s missing from event_id will be ignored.' % this_event) else: unique_events_id = unique_events if color is None: if len(unique_events) > len(COLORS): warnings.warn('More events than colors available. ' 'You should pass a list of unique colors.') colors = cycle(COLORS) color = dict() for this_event, this_color in zip(unique_events_id, colors): color[this_event] = this_color else: for this_event in color: if this_event not in unique_events_id: raise ValueError('%s from color is not present in events ' 'or event_id.' % this_event) for this_event in unique_events_id: if this_event not in color: warnings.warn('Color is not available for event %d. Default ' 'colors will be used.' % this_event) import matplotlib.pyplot as plt fig = None if axes is None: fig = plt.figure() ax = axes if axes else plt.gca() unique_events_id = np.array(unique_events_id) min_event = np.min(unique_events_id) max_event = np.max(unique_events_id) for idx, ev in enumerate(unique_events_id): ev_mask = events[:, 2] == ev kwargs = {} if event_id is not None: event_label = '{0} ({1})'.format(event_id_rev[ev], np.sum(ev_mask)) kwargs['label'] = event_label if ev in color: kwargs['color'] = color[ev] if equal_spacing: ax.plot((events[ev_mask, 0] - first_samp) / sfreq, (idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs) else: ax.plot((events[ev_mask, 0] - first_samp) / sfreq, events[ev_mask, 2], '.', **kwargs) if equal_spacing: ax.set_ylim(0, unique_events_id.size + 1) ax.set_yticks(1 + np.arange(unique_events_id.size)) ax.set_yticklabels(unique_events_id) else: ax.set_ylim([min_event - 1, max_event + 1]) ax.set_xlabel(xlabel) ax.set_ylabel('Events id') ax.grid('on') fig = fig if fig is not None else plt.gcf() if event_id is not None: box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) fig.canvas.draw() if show: plt.show() return fig def _get_presser(fig): """Helper to get our press callback""" callbacks = fig.canvas.callbacks.callbacks['button_press_event'] func = None for key, val in callbacks.items(): if val.func.__class__.__name__ == 'partial': func = val.func break assert func is not None return func def plot_dipole_amplitudes(dipoles, colors=None, show=True): """Plot the amplitude traces of a set of dipoles Parameters ---------- dipoles : list of instance of Dipoles The dipoles whose amplitudes should be shown. colors: list of colors | None Color to plot with each dipole. If None default colors are used. show : bool Show figure if True. Returns ------- fig : matplotlib.figure.Figure The figure object containing the plot. Notes ----- .. versionadded:: 0.9.0 """ import matplotlib.pyplot as plt if colors is None: colors = cycle(COLORS) fig, ax = plt.subplots(1, 1) xlim = [np.inf, -np.inf] for dip, color in zip(dipoles, colors): ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5) xlim[0] = min(xlim[0], dip.times[0]) xlim[1] = max(xlim[1], dip.times[-1]) ax.set_xlim(xlim) ax.set_xlabel('Time (sec)') ax.set_ylabel('Amplitude (nAm)') if show: fig.show() return fig
bsd-3-clause
run2/citytour
4symantec/Lib/site-packages/numpy-1.9.2-py2.7-win-amd64.egg/numpy/lib/npyio.py
21
66671
from __future__ import division, absolute_import, print_function import sys import os import re import itertools import warnings import weakref from operator import itemgetter import numpy as np from . import format from ._datasource import DataSource from ._compiled_base import packbits, unpackbits from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name ) from numpy.compat import ( asbytes, asstr, asbytes_nested, bytes, basestring, unicode ) if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle from future_builtins import map loads = pickle.loads __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' ] def seek_gzip_factory(f): """Use this factory to produce the class so that we can do a lazy import on gzip. """ import gzip class GzipFile(gzip.GzipFile): def seek(self, offset, whence=0): # figure out new position (we can only seek forwards) if whence == 1: offset = self.offset + offset if whence not in [0, 1]: raise IOError("Illegal argument") if offset < self.offset: # for negative seek, rewind and do positive seek self.rewind() count = offset - self.offset for i in range(count // 1024): self.read(1024) self.read(count % 1024) def tell(self): return self.offset if isinstance(f, str): f = GzipFile(f) elif isinstance(f, gzip.GzipFile): # cast to our GzipFile if its already a gzip.GzipFile try: name = f.name except AttributeError: # Backward compatibility for <= 2.5 name = f.filename mode = f.mode f = GzipFile(fileobj=f.fileobj, filename=name) f.mode = mode return f class BagObj(object): """ BagObj(obj) Convert attribute look-ups to getitems on the object passed in. Parameters ---------- obj : class instance Object on which attribute look-up is performed. Examples -------- >>> from numpy.lib.npyio import BagObj as BO >>> class BagDemo(object): ... def __getitem__(self, key): # An instance of BagObj(BagDemo) ... # will call this method when any ... # attribute look-up is required ... result = "Doesn't matter what you want, " ... return result + "you're gonna get this" ... >>> demo_obj = BagDemo() >>> bagobj = BO(demo_obj) >>> bagobj.hello_there "Doesn't matter what you want, you're gonna get this" >>> bagobj.I_can_be_anything "Doesn't matter what you want, you're gonna get this" """ def __init__(self, obj): # Use weakref to make NpzFile objects collectable by refcount self._obj = weakref.proxy(obj) def __getattribute__(self, key): try: return object.__getattribute__(self, '_obj')[key] except KeyError: raise AttributeError(key) def zipfile_factory(*args, **kwargs): import zipfile kwargs['allowZip64'] = True return zipfile.ZipFile(*args, **kwargs) class NpzFile(object): """ NpzFile(fid) A dictionary-like object with lazy-loading of files in the zipped archive provided on construction. `NpzFile` is used to load files in the NumPy ``.npz`` data archive format. It assumes that files in the archive have a ``.npy`` extension, other files are ignored. The arrays and file strings are lazily loaded on either getitem access using ``obj['key']`` or attribute lookup using ``obj.f.key``. A list of all files (without ``.npy`` extensions) can be obtained with ``obj.files`` and the ZipFile object itself using ``obj.zip``. Attributes ---------- files : list of str List of all files in the archive with a ``.npy`` extension. zip : ZipFile instance The ZipFile object initialized with the zipped archive. f : BagObj instance An object on which attribute can be performed as an alternative to getitem access on the `NpzFile` instance itself. Parameters ---------- fid : file or str The zipped archive to open. This is either a file-like object or a string containing the path to the archive. own_fid : bool, optional Whether NpzFile should close the file handle. Requires that `fid` is a file-like object. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True >>> npz.files ['y', 'x'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ def __init__(self, fid, own_fid=False): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) self._files = _zip.namelist() self.files = [] for x in self._files: if x.endswith('.npy'): self.files.append(x[:-4]) else: self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: self.fid = fid else: self.fid = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): """ Close the file. """ if self.zip is not None: self.zip.close() self.zip = None if self.fid is not None: self.fid.close() self.fid = None self.f = None # break reference cycle def __del__(self): self.close() def __getitem__(self, key): # FIXME: This seems like it will copy strings around # more than is strictly necessary. The zipfile # will read the string and then # the format.read_array will copy the string # to another place in memory. # It would be better if the zipfile could read # (or at least uncompress) the data # directly into the array memory. member = 0 if key in self._files: member = 1 elif key in self.files: member = 1 key += '.npy' if member: bytes = self.zip.open(key) magic = bytes.read(len(format.MAGIC_PREFIX)) bytes.close() if magic == format.MAGIC_PREFIX: bytes = self.zip.open(key) return format.read_array(bytes) else: return self.zip.read(key) else: raise KeyError("%s is not a file in the archive" % key) def __iter__(self): return iter(self.files) def items(self): """ Return a list of tuples, with each tuple (filename, array in file). """ return [(f, self[f]) for f in self.files] def iteritems(self): """Generator that returns tuples (filename, array in file).""" for f in self.files: yield (f, self[f]) def keys(self): """Return files in the archive with a ``.npy`` extension.""" return self.files def iterkeys(self): """Return an iterator over the files in the archive.""" return self.__iter__() def __contains__(self, key): return self.files.__contains__(key) def load(file, mmap_mode=None): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. Parameters ---------- file : file-like object or string The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap` for a detailed description of the modes). A memory-mapped array is kept on disk. However, it can be accessed and sliced like any ndarray. Memory mapping is especially useful for accessing small fragments of large files without reading the entire file into memory. Returns ------- result : array, tuple, dict, etc. Data stored in the file. For ``.npz`` files, the returned instance of NpzFile class must be closed to avoid leaking file descriptors. Raises ------ IOError If the input file does not exist or cannot be read. See Also -------- save, savez, savez_compressed, loadtxt memmap : Create a memory-map to an array stored in a file on disk. Notes ----- - If the file contains pickle data, then whatever object is stored in the pickle is returned. - If the file is a ``.npy`` file, then a single array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is returned, containing ``{filename: array}`` key-value pairs, one for each file in the archive. - If the file is a ``.npz`` file, the returned value supports the context manager protocol in a similar fashion to the open function:: with load('foo.npz') as data: a = data['a'] The underlying file descriptor is closed when exiting the 'with' block. Examples -------- Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') array([[1, 2, 3], [4, 5, 6]]) Store compressed data to disk, and load it again: >>> a=np.array([[1, 2, 3], [4, 5, 6]]) >>> b=np.array([1, 2]) >>> np.savez('/tmp/123.npz', a=a, b=b) >>> data = np.load('/tmp/123.npz') >>> data['a'] array([[1, 2, 3], [4, 5, 6]]) >>> data['b'] array([1, 2]) >>> data.close() Mem-map the stored array, and then access the second row directly from disk: >>> X = np.load('/tmp/123.npy', mmap_mode='r') >>> X[1, :] memmap([4, 5, 6]) """ import gzip own_fid = False if isinstance(file, basestring): fid = open(file, "rb") own_fid = True elif isinstance(file, gzip.GzipFile): fid = seek_gzip_factory(file) else: fid = file try: # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = asbytes('PK\x03\x04') N = len(format.MAGIC_PREFIX) magic = fid.read(N) fid.seek(-N, 1) # back-up if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) # Transfer file ownership to NpzFile tmp = own_fid own_fid = False return NpzFile(fid, own_fid=tmp) elif magic == format.MAGIC_PREFIX: # .npy file if mmap_mode: return format.open_memmap(file, mode=mmap_mode) else: return format.read_array(fid) else: # Try a pickle try: return pickle.load(fid) except: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: if own_fid: fid.close() def save(file, arr): """ Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- file : file or str File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string, a ``.npy`` extension will be appended to the file name if it does not already have one. arr : array_like Array data to be saved. See Also -------- savez : Save several arrays into a ``.npz`` archive savetxt, load Notes ----- For a description of the ``.npy`` format, see `format`. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ own_fid = False if isinstance(file, basestring): if not file.endswith('.npy'): file = file + '.npy' fid = open(file, "wb") own_fid = True else: fid = file try: arr = np.asanyarray(arr) format.write_array(fid, arr) finally: if own_fid: fid.close() def savez(file, *args, **kwds): """ Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- save : Save a single array to a binary file in NumPy format. savetxt : Save an array to a file as plain text. savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see `format`. When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files ['arr_1', 'arr_0'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Using `savez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npzfile = np.load(outfile) >>> npzfile.files ['y', 'x'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ _savez(file, args, kwds, False) def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. Parameters ---------- file : str File name of ``.npz`` file. args : Arguments Function arguments. kwds : Keyword arguments Keywords. See Also -------- numpy.savez : Save several arrays into an uncompressed ``.npz`` file format numpy.load : Load the files created by savez_compressed. """ _savez(file, args, kwds, True) def _savez(file, args, kwds, compress): # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile # Import deferred for startup time improvement import tempfile if isinstance(file, basestring): if not file.endswith('.npz'): file = file + '.npz' namedict = kwds for i, val in enumerate(args): key = 'arr_%d' % i if key in namedict.keys(): raise ValueError( "Cannot use un-named variables and keyword %s" % key) namedict[key] = val if compress: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) # Stage arrays in a temporary file on disk, before writing to zip. fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: format.write_array(fid, np.asanyarray(val)) fid.close() fid = None zipf.write(tmpfile, arcname=fname) finally: if fid: fid.close() finally: os.remove(tmpfile) zipf.close() def _getconv(dtype): """ Find the correct dtype converter. Adapted from matplotlib """ typ = dtype.type if issubclass(typ, np.bool_): return lambda x: bool(int(x)) if issubclass(typ, np.uint64): return np.uint64 if issubclass(typ, np.int64): return np.int64 if issubclass(typ, np.integer): return lambda x: int(float(x)) elif issubclass(typ, np.floating): return float elif issubclass(typ, np.complex): return complex elif issubclass(typ, np.bytes_): return bytes else: return str def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0): """ Load data from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators should return byte strings for Python 3k. dtype : data-type, optional Data-type of the resulting array; default: float. If this is a record data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. comments : str, optional The character used to indicate the start of a comment; default: '#'. delimiter : str, optional The string used to separate values. By default, this is any whitespace. converters : dict, optional A dictionary mapping column number to a function that will convert that column to a float. E.g., if column 0 is a date string: ``converters = {0: datestr2num}``. Converters can also be used to provide a default value for missing data (but see also `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a record data-type, arrays are returned for each field. Default is False. ndmin : int, optional The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 Returns ------- out : ndarray Data read from the text file. See Also -------- load, fromstring, fromregex genfromtxt : Load data with missing values handled as specified. scipy.io.loadmat : reads MATLAB data files Notes ----- This function aims to be a fast reader for simply formatted files. The `genfromtxt` function provides more sophisticated handling of, e.g., lines with missing values. Examples -------- >>> from StringIO import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) >>> d = StringIO("M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO("1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) >>> y array([ 2., 4.]) """ # Type conversions for Py3 convenience if comments is not None: comments = asbytes(comments) user_converters = converters if delimiter is not None: delimiter = asbytes(delimiter) if usecols is not None: usecols = list(usecols) fown = False try: if _is_string_like(fname): fown = True if fname.endswith('.gz'): fh = iter(seek_gzip_factory(fname)) elif fname.endswith('.bz2'): import bz2 fh = iter(bz2.BZ2File(fname)) elif sys.version_info[0] == 2: fh = iter(open(fname, 'U')) else: fh = iter(open(fname)) else: fh = iter(fname) except TypeError: raise ValueError('fname must be a string, file handle, or generator') X = [] def flatten_dtype(dt): """Unpack a structured data-type, and produce re-packing info.""" if dt.names is None: # If the dtype is flattened, return. # If the dtype has a shape, the dtype occurs # in the list more than once. shape = dt.shape if len(shape) == 0: return ([dt.base], None) else: packing = [(shape[-1], list)] if len(shape) > 1: for dim in dt.shape[-2::-1]: packing = [(dim*packing[0][0], packing*dim)] return ([dt.base] * int(np.prod(dt.shape)), packing) else: types = [] packing = [] for field in dt.names: tp, bytes = dt.fields[field] flat_dt, flat_packing = flatten_dtype(tp) types.extend(flat_dt) # Avoid extra nesting for subarrays if len(tp.shape) > 0: packing.extend(flat_packing) else: packing.append((len(flat_dt), flat_packing)) return (types, packing) def pack_items(items, packing): """Pack items into nested lists based on re-packing info.""" if packing is None: return items[0] elif packing is tuple: return tuple(items) elif packing is list: return list(items) else: start = 0 ret = [] for length, subpacking in packing: ret.append(pack_items(items[start:start+length], subpacking)) start += length return tuple(ret) def split_line(line): """Chop off comments, strip, and split at delimiter.""" if comments is None: line = asbytes(line).strip(asbytes('\r\n')) else: line = asbytes(line).split(comments)[0].strip(asbytes('\r\n')) if line: return line.split(delimiter) else: return [] try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) defconv = _getconv(dtype) # Skip the first `skiprows` lines for i in range(skiprows): next(fh) # Read until we find a line with some values, and use # it to estimate the number of columns, N. first_vals = None try: while not first_vals: first_line = next(fh) first_vals = split_line(first_line) except StopIteration: # End of lines reached first_line = '' first_vals = [] warnings.warn('loadtxt: Empty input file: "%s"' % fname) N = len(usecols or first_vals) dtype_types, packing = flatten_dtype(dtype) if len(dtype_types) > 1: # We're dealing with a structured array, each field of # the dtype matches a column converters = [_getconv(dt) for dt in dtype_types] else: # All fields have the same dtype converters = [defconv for i in range(N)] if N > 1: packing = [(N, tuple)] # By preference, use the converters specified by the user for i, conv in (user_converters or {}).items(): if usecols: try: i = usecols.index(i) except ValueError: # Unused converter specified continue converters[i] = conv # Parse each line, including the first for i, line in enumerate(itertools.chain([first_line], fh)): vals = split_line(line) if len(vals) == 0: continue if usecols: vals = [vals[i] for i in usecols] if len(vals) != N: line_num = i + skiprows + 1 raise ValueError("Wrong number of columns at line %d" % line_num) # Convert each value according to its column and store items = [conv(val) for (conv, val) in zip(converters, vals)] # Then pack it according to the dtype's nesting items = pack_items(items, packing) X.append(items) finally: if fown: fh.close() X = np.array(X, dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) # Verify that the array has at least dimensions `ndmin`. # Check correctness of the values of `ndmin` if ndmin not in [0, 1, 2]: raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) # Tweak the size and shape of the arrays - remove extraneous dimensions if X.ndim > ndmin: X = np.squeeze(X) # and ensure we have the minimum number of dimensions asked for # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 if X.ndim < ndmin: if ndmin == 1: X = np.atleast_1d(X) elif ndmin == 2: X = np.atleast_2d(X).T if unpack: if len(dtype_types) > 1: # For structured arrays, return an array for each field. return [X[field] for field in dtype.names] else: return X.T else: return X def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '): """ Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns c) a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) own_fh = False if _is_string_like(fname): own_fh = True if fname.endswith('.gz'): import gzip fh = gzip.open(fname, 'wb') else: if sys.version_info[0] >= 3: fh = open(fname, 'wb') else: fh = open(fname, 'w') elif hasattr(fname, 'write'): fh = fname else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt,)) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(asbytes(comments + header + newline)) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) fh.write(asbytes(format % tuple(row2) + newline)) else: for row in X: fh.write(asbytes(format % tuple(row) + newline)) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(asbytes(comments + footer + newline)) finally: if own_fh: fh.close() def fromregex(file, regexp, dtype): """ Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : str or file File name or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. Returns ------- output : ndarray The output array, containing the part of the content of `file` that was matched by `regexp`. `output` is always a structured array. Raises ------ TypeError When `dtype` is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see `doc.structured_arrays`. Examples -------- >>> f = open('test.dat', 'w') >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', '<i8'), ('key', '|S3')]) >>> output['num'] array([1312, 1534, 444], dtype=int64) """ own_fh = False if not hasattr(file, "read"): file = open(file, 'rb') own_fh = True try: if not hasattr(regexp, 'match'): regexp = re.compile(asbytes(regexp)) if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) output.dtype = dtype else: output = np.array(seq, dtype=dtype) return output finally: if own_fh: file.close() #####-------------------------------------------------------------------------- #---- --- ASCII functions --- #####-------------------------------------------------------------------------- def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, skip_header=0, skip_footer=0, converters=None, missing='', missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True): """ Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skip_rows : int, optional `skip_rows` was deprecated in numpy 1.5, and will be removed in numpy 2.0. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was deprecated in numpy 1.5, and will be removed in numpy 2.0. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first valid line after the first `skip_header` lines. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variables names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`, there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] Numpy User Guide, section `I/O with Numpy <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples --------- >>> from StringIO import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO("1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Using dtype = None >>> s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Specifying dtype and names >>> s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) An example with fixed-width columns >>> s = StringIO("11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, 'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) """ # Py3 data conversions to bytes, for convenience if comments is not None: comments = asbytes(comments) if isinstance(delimiter, unicode): delimiter = asbytes(delimiter) if isinstance(missing, unicode): missing = asbytes(missing) if isinstance(missing_values, (unicode, list, tuple)): missing_values = asbytes_nested(missing_values) # if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters user_converters = converters or {} if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if isinstance(fname, basestring): if sys.version_info[0] == 2: fhd = iter(np.lib._datasource.open(fname, 'rbU')) else: fhd = iter(np.lib._datasource.open(fname, 'rb')) own_fhd = True else: fhd = iter(fname) except TypeError: raise TypeError( "fname must be a string, filehandle, or generator. " "(got %s instead)" % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip)._handyman validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Get the first valid lines after the first skiprows ones .. if skiprows: warnings.warn( "The use of `skiprows` is deprecated, it will be removed in " "numpy 2.0.\nPlease use `skip_header` instead.", DeprecationWarning) skip_header = skiprows # Skip the first `skip_header` rows for i in range(skip_header): next(fhd) # Keep on until we find the first valid values first_values = None try: while not first_values: first_line = next(fhd) if names is True: if comments in first_line: first_line = ( asbytes('').join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty first_line = asbytes('') first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname) # Should we take the first values as names ? if names is True: fval = first_values[0].strip() if fval in comments: del first_values[0] # Check the columns to use: make sure `usecols` is a list if usecols is not None: try: usecols = [_.strip() for _ in usecols.split(",")] except AttributeError: try: usecols = list(usecols) except TypeError: usecols = [usecols, ] nbcols = len(usecols or first_values) # Check the names and overwrite the dtype.names if needed if names is True: names = validate_names([_bytes_to_name(_.strip()) for _ in first_values]) first_line = asbytes('') elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: names = validate_names(names) # Get the dtype if dtype is not None: dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names) # Make sure the names is a list (for 2.5) if names is not None: names = list(names) if usecols: for (i, current) in enumerate(usecols): # if usecols is a list of names, convert to a list of indices if _is_string_like(current): usecols[i] = names.index(current) elif current < 0: usecols[i] = current + len(first_values) # If the dtype is not None, make sure we update it if (dtype is not None) and (len(dtype) > nbcols): descr = dtype.descr dtype = np.dtype([descr[_] for _ in usecols]) names = list(dtype.names) # If `names` is not None, update the names elif (names is not None) and (len(names) > nbcols): names = [names[_] for _ in usecols] elif (names is not None) and (dtype is not None): names = list(dtype.names) # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () # Define the list of missing_values (one column: one list) missing_values = [list([asbytes('')]) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): # Loop on the items for (key, val) in user_missing_values.items(): # Is the key a string ? if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped continue # Redefine the key as needed if it's a column number if usecols: try: key = usecols.index(key) except ValueError: pass # Transform the value as a list of string if isinstance(val, (list, tuple)): val = [str(_) for _ in val] else: val = [str(val), ] # Add the value(s) to the current list of missing if key is None: # None acts as default for miss in missing_values: miss.extend(val) else: missing_values[key].extend(val) # We have a sequence : each item matches a column elif isinstance(user_missing_values, (list, tuple)): for (value, entry) in zip(user_missing_values, missing_values): value = str(value) if value not in entry: entry.append(value) # We have a string : apply it to all entries elif isinstance(user_missing_values, bytes): user_value = user_missing_values.split(asbytes(",")) for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries else: for entry in missing_values: entry.extend([str(user_missing_values)]) # Process the deprecated `missing` if missing != asbytes(''): warnings.warn( "The use of `missing` is deprecated, it will be removed in " "Numpy 2.0.\nPlease use `missing_values` instead.", DeprecationWarning) values = [str(_) for _ in missing.split(asbytes(","))] for entry in missing_values: entry.extend(values) # Process the filling_values ............................... # Rename the input for convenience user_filling_values = filling_values if user_filling_values is None: user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually if isinstance(user_filling_values, dict): for (key, val) in user_filling_values.items(): if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped, continue # Redefine the key if it's a column number and usecols is defined if usecols: try: key = usecols.index(key) except ValueError: pass # Add the value to the list filling_values[key] = val # We have a sequence : update on a one-to-one basis elif isinstance(user_filling_values, (list, tuple)): n = len(user_filling_values) if (n <= nbcols): filling_values[:n] = user_filling_values else: filling_values = user_filling_values[:nbcols] # We have something else : use it for all entries else: filling_values = [user_filling_values] * nbcols # Initialize the converters ................................ if dtype is None: # Note: we can't use a [...]*nbcols, as we would have 3 times the same # ... converter, instead of 3 different converters. converters = [StringConverter(None, missing_values=miss, default=fill) for (miss, fill) in zip(missing_values, filling_values)] else: dtype_flat = flatten_dtype(dtype, flatten_base=True) # Initialize the converters if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) converters = [StringConverter(dt, locked=True, missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) converters = [StringConverter(dtype, locked=True, missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead if _is_string_like(j): try: j = names.index(j) i = j except ValueError: continue elif usecols: try: i = usecols.index(j) except ValueError: # Unused converter specified continue else: i = j # Find the value to test - first_line is not filtered by usecols: if len(first_line): testing_value = first_values[j] else: testing_value = None converters[i].update(conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) uc_update.append((i, conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. #miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows rows = [] append_to_rows = rows.append # ... masks if usemask: masks = [] append_to_masks = masks.append # ... invalid invalid = [] append_to_invalid = invalid.append # Parse each line for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): values = split_line(line) nbvalues = len(values) # Skip an empty line if nbvalues == 0: continue # Select only the columns we need if usecols: try: values = [values[_] for _ in usecols] except IndexError: append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) if usemask: append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) if own_fhd: fhd.close() # Upgrade the converters (if needed) if dtype is None: for (i, converter) in enumerate(converters): current_column = [itemgetter(i)(_m) for _m in rows] try: converter.iterupgrade(current_column) except ConverterLockError: errmsg = "Converter #%i is locked and cannot be upgraded: " % i current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): errmsg += "(occurred line #%i for value '%s')" errmsg %= (j + 1 + skip_header, value) raise ConverterError(errmsg) # Check that we don't have invalid values nbinvalid = len(invalid) if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) invalid = invalid[:nbinvalid - nbinvalid_skipped] skip_footer -= nbinvalid_skipped # # nbrows -= skip_footer # errmsg = [template % (i, nb) # for (i, nb) in invalid if i < nbrows] # else: errmsg = [template % (i, nb) for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) # Raise an exception ? if invalid_raise: raise ValueError(errmsg) # Issue a warning ? else: warnings.warn(errmsg, ConversionWarning) # Strip the last skip_footer data if skip_footer > 0: rows = rows[:-skip_footer] if usemask: masks = masks[:-skip_footer] # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... if loose: rows = list( zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) else: rows = list( zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) # Reset the dtype data = rows if dtype is None: # Get the dtypes from the types of the converters column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) if v in (type('S'), np.string_)] # ... and take the largest number of chars. for i in strcolidx: column_types[i] = "|S%i" % max(len(row[i]) for row in data) # if names is None: # If the dtype is uniform, don't define names, else use '' base = set([c.type for c in converters if c._checked]) if len(base) == 1: (ddtype, mdtype) = (list(base)[0], np.bool) else: ddtype = [(defaultfmt % i, dt) for (i, dt) in enumerate(column_types)] if usemask: mdtype = [(defaultfmt % i, np.bool) for (i, dt) in enumerate(column_types)] else: ddtype = list(zip(names, column_types)) mdtype = list(zip(names, [np.bool] * len(column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed if names and dtype.names: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. if 'O' in (_.char for _ in dtype_flat): if has_nested_fields(dtype): raise NotImplementedError( "Nested fields involving objects are not supported...") else: output = np.array(data, dtype=dtype) else: rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) output = rows.view(dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array( masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) # Case #2. We have a basic dtype else: # We used some user-defined converters if user_converters: ishomogeneous = True descr = [] for i, ttype in enumerate([conv.type for conv in converters]): # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) if ttype == np.string_: ttype = "|S%i" % max(len(row[i]) for row in data) descr.append(('', ttype)) else: descr.append(('', dtype)) # So we changed the dtype ? if not ishomogeneous: # We have more than one field if len(descr) > 1: dtype = np.dtype(descr) # We have only one field: drop the name if not needed. else: dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: if dtype.names: mdtype = [(_, np.bool) for _ in dtype.names] else: mdtype = np.bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: for (name, conv) in zip(names or (), converters): missing_values = [conv(_) for _ in conv.missing_values if _ != asbytes('')] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array if usemask: output = output.view(MaskedArray) output._mask = outputmask if unpack: return output.squeeze().T return output.squeeze() def ndfromtxt(fname, **kwargs): """ Load ASCII data stored in a file and return it as a single array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function. """ kwargs['usemask'] = False return genfromtxt(fname, **kwargs) def mafromtxt(fname, **kwargs): """ Load ASCII data stored in a text file and return a masked array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. """ kwargs['usemask'] = True return genfromtxt(fname, **kwargs) def recfromtxt(fname, **kwargs): """ Load ASCII data from a file and return it in a record array. If ``usemask=False`` a standard `recarray` is returned, if ``usemask=True`` a MaskedRecords array is returned. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ kwargs.setdefault("dtype", None) usemask = kwargs.get('usemask', False) output = genfromtxt(fname, **kwargs) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output def recfromcsv(fname, **kwargs): """ Load ASCII data stored in a comma-separated file. The returned array is a record array (if ``usemask=False``, see `recarray`) or a masked record array (if ``usemask=True``, see `ma.mrecords.MaskedRecords`). Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ # Set default kwargs for genfromtxt as relevant to csv import. kwargs.setdefault("case_sensitive", "lower") kwargs.setdefault("names", True) kwargs.setdefault("delimiter", ",") kwargs.setdefault("dtype", None) output = genfromtxt(fname, **kwargs) usemask = kwargs.get("usemask", False) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output
mit
scottpurdy/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py
69
37420
""" Tick locating and formatting ============================ This module contains classes to support completely configurable tick locating and formatting. Although the locators know nothing about major or minor ticks, they are used by the Axis class to support major and minor tick locating and formatting. Generic tick locators and formatters are provided, as well as domain specific custom ones.. Tick locating ------------- The Locator class is the base class for all tick locators. The locators handle autoscaling of the view limits based on the data limits, and the choosing of tick locations. A useful semi-automatic tick locator is MultipleLocator. You initialize this with a base, eg 10, and it picks axis limits and ticks that are multiples of your base. The Locator subclasses defined here are :class:`NullLocator` No ticks :class:`FixedLocator` Tick locations are fixed :class:`IndexLocator` locator for index plots (eg. where x = range(len(y))) :class:`LinearLocator` evenly spaced ticks from min to max :class:`LogLocator` logarithmically ticks from min to max :class:`MultipleLocator` ticks and range are a multiple of base; either integer or float :class:`OldAutoLocator` choose a MultipleLocator and dyamically reassign it for intelligent ticking during navigation :class:`MaxNLocator` finds up to a max number of ticks at nice locations :class:`AutoLocator` :class:`MaxNLocator` with simple defaults. This is the default tick locator for most plotting. There are a number of locators specialized for date locations - see the dates module You can define your own locator by deriving from Locator. You must override the __call__ method, which returns a sequence of locations, and you will probably want to override the autoscale method to set the view limits from the data limits. If you want to override the default locator, use one of the above or a custom locator and pass it to the x or y axis instance. The relevant methods are:: ax.xaxis.set_major_locator( xmajorLocator ) ax.xaxis.set_minor_locator( xminorLocator ) ax.yaxis.set_major_locator( ymajorLocator ) ax.yaxis.set_minor_locator( yminorLocator ) The default minor locator is the NullLocator, eg no minor ticks on by default. Tick formatting --------------- Tick formatting is controlled by classes derived from Formatter. The formatter operates on a single tick value and returns a string to the axis. :class:`NullFormatter` no labels on the ticks :class:`FixedFormatter` set the strings manually for the labels :class:`FuncFormatter` user defined function sets the labels :class:`FormatStrFormatter` use a sprintf format string :class:`ScalarFormatter` default formatter for scalars; autopick the fmt string :class:`LogFormatter` formatter for log axes You can derive your own formatter from the Formatter base class by simply overriding the ``__call__`` method. The formatter class has access to the axis view and data limits. To control the major and minor tick label formats, use one of the following methods:: ax.xaxis.set_major_formatter( xmajorFormatter ) ax.xaxis.set_minor_formatter( xminorFormatter ) ax.yaxis.set_major_formatter( ymajorFormatter ) ax.yaxis.set_minor_formatter( yminorFormatter ) See :ref:`pylab_examples-major_minor_demo1` for an example of setting major an minor ticks. See the :mod:`matplotlib.dates` module for more information and examples of using date locators and formatters. """ from __future__ import division import math import numpy as np from matplotlib import rcParams from matplotlib import cbook from matplotlib import transforms as mtransforms class TickHelper: axis = None class DummyAxis: def __init__(self): self.dataLim = mtransforms.Bbox.unit() self.viewLim = mtransforms.Bbox.unit() def get_view_interval(self): return self.viewLim.intervalx def set_view_interval(self, vmin, vmax): self.viewLim.intervalx = vmin, vmax def get_data_interval(self): return self.dataLim.intervalx def set_data_interval(self, vmin, vmax): self.dataLim.intervalx = vmin, vmax def set_axis(self, axis): self.axis = axis def create_dummy_axis(self): if self.axis is None: self.axis = self.DummyAxis() def set_view_interval(self, vmin, vmax): self.axis.set_view_interval(vmin, vmax) def set_data_interval(self, vmin, vmax): self.axis.set_data_interval(vmin, vmax) def set_bounds(self, vmin, vmax): self.set_view_interval(vmin, vmax) self.set_data_interval(vmin, vmax) class Formatter(TickHelper): """ Convert the tick location to a string """ # some classes want to see all the locs to help format # individual ones locs = [] def __call__(self, x, pos=None): 'Return the format for tick val x at position pos; pos=None indicated unspecified' raise NotImplementedError('Derived must overide') def format_data(self,value): return self.__call__(value) def format_data_short(self,value): 'return a short string version' return self.format_data(value) def get_offset(self): return '' def set_locs(self, locs): self.locs = locs def fix_minus(self, s): """ some classes may want to replace a hyphen for minus with the proper unicode symbol as described `here <http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_. The default is to do nothing Note, if you use this method, eg in :meth`format_data` or call, you probably don't want to use it for :meth:`format_data_short` since the toolbar uses this for interative coord reporting and I doubt we can expect GUIs across platforms will handle the unicode correctly. So for now the classes that override :meth:`fix_minus` should have an explicit :meth:`format_data_short` method """ return s class NullFormatter(Formatter): 'Always return the empty string' def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' return '' class FixedFormatter(Formatter): 'Return fixed strings for tick labels' def __init__(self, seq): """ seq is a sequence of strings. For positions `i<len(seq)` return *seq[i]* regardless of *x*. Otherwise return '' """ self.seq = seq self.offset_string = '' def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' if pos is None or pos>=len(self.seq): return '' else: return self.seq[pos] def get_offset(self): return self.offset_string def set_offset_string(self, ofs): self.offset_string = ofs class FuncFormatter(Formatter): """ User defined function for formatting """ def __init__(self, func): self.func = func def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' return self.func(x, pos) class FormatStrFormatter(Formatter): """ Use a format string to format the tick """ def __init__(self, fmt): self.fmt = fmt def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' return self.fmt % x class OldScalarFormatter(Formatter): """ Tick location is a plain old number. """ def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' xmin, xmax = self.axis.get_view_interval() d = abs(xmax - xmin) return self.pprint_val(x,d) def pprint_val(self, x, d): #if the number is not too big and it's an int, format it as an #int if abs(x)<1e4 and x==int(x): return '%d' % x if d < 1e-2: fmt = '%1.3e' elif d < 1e-1: fmt = '%1.3f' elif d > 1e5: fmt = '%1.1e' elif d > 10 : fmt = '%1.1f' elif d > 1 : fmt = '%1.2f' else: fmt = '%1.3f' s = fmt % x #print d, x, fmt, s tup = s.split('e') if len(tup)==2: mantissa = tup[0].rstrip('0').rstrip('.') sign = tup[1][0].replace('+', '') exponent = tup[1][1:].lstrip('0') s = '%se%s%s' %(mantissa, sign, exponent) else: s = s.rstrip('0').rstrip('.') return s class ScalarFormatter(Formatter): """ Tick location is a plain old number. If useOffset==True and the data range is much smaller than the data average, then an offset will be determined such that the tick labels are meaningful. Scientific notation is used for data < 1e-3 or data >= 1e4. """ def __init__(self, useOffset=True, useMathText=False): # useOffset allows plotting small data ranges with large offsets: # for example: [1+1e-9,1+2e-9,1+3e-9] # useMathText will render the offset and scientific notation in mathtext self._useOffset = useOffset self._usetex = rcParams['text.usetex'] self._useMathText = useMathText self.offset = 0 self.orderOfMagnitude = 0 self.format = '' self._scientific = True self._powerlimits = rcParams['axes.formatter.limits'] def fix_minus(self, s): 'use a unicode minus rather than hyphen' if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s else: return s.replace('-', u'\u2212') def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' if len(self.locs)==0: return '' else: s = self.pprint_val(x) return self.fix_minus(s) def set_scientific(self, b): '''True or False to turn scientific notation on or off see also :meth:`set_powerlimits` ''' self._scientific = bool(b) def set_powerlimits(self, lims): ''' Sets size thresholds for scientific notation. e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in which scientific notation is used for numbers less than 1e-3 or greater than 1e4. See also :meth:`set_scientific`. ''' assert len(lims) == 2, "argument must be a sequence of length 2" self._powerlimits = lims def format_data_short(self,value): 'return a short formatted string representation of a number' return '%1.3g'%value def format_data(self,value): 'return a formatted string representation of a number' s = self._formatSciNotation('%1.10e'% value) return self.fix_minus(s) def get_offset(self): """Return scientific notation, plus offset""" if len(self.locs)==0: return '' s = '' if self.orderOfMagnitude or self.offset: offsetStr = '' sciNotStr = '' if self.offset: offsetStr = self.format_data(self.offset) if self.offset > 0: offsetStr = '+' + offsetStr if self.orderOfMagnitude: if self._usetex or self._useMathText: sciNotStr = self.format_data(10**self.orderOfMagnitude) else: sciNotStr = '1e%d'% self.orderOfMagnitude if self._useMathText: if sciNotStr != '': sciNotStr = r'\times\mathdefault{%s}' % sciNotStr s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$')) elif self._usetex: if sciNotStr != '': sciNotStr = r'\times%s' % sciNotStr s = ''.join(('$',sciNotStr,offsetStr,'$')) else: s = ''.join((sciNotStr,offsetStr)) return self.fix_minus(s) def set_locs(self, locs): 'set the locations of the ticks' self.locs = locs if len(self.locs) > 0: vmin, vmax = self.axis.get_view_interval() d = abs(vmax-vmin) if self._useOffset: self._set_offset(d) self._set_orderOfMagnitude(d) self._set_format() def _set_offset(self, range): # offset of 20,001 is 20,000, for example locs = self.locs if locs is None or not len(locs) or range == 0: self.offset = 0 return ave_loc = np.mean(locs) if ave_loc: # dont want to take log10(0) ave_oom = math.floor(math.log10(np.mean(np.absolute(locs)))) range_oom = math.floor(math.log10(range)) if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs if ave_loc < 0: self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom else: self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom) else: self.offset = 0 def _set_orderOfMagnitude(self,range): # if scientific notation is to be used, find the appropriate exponent # if using an numerical offset, find the exponent after applying the offset if not self._scientific: self.orderOfMagnitude = 0 return locs = np.absolute(self.locs) if self.offset: oom = math.floor(math.log10(range)) else: if locs[0] > locs[-1]: val = locs[0] else: val = locs[-1] if val == 0: oom = 0 else: oom = math.floor(math.log10(val)) if oom <= self._powerlimits[0]: self.orderOfMagnitude = oom elif oom >= self._powerlimits[1]: self.orderOfMagnitude = oom else: self.orderOfMagnitude = 0 def _set_format(self): # set the format string to format all the ticklabels # The floating point black magic (adding 1e-15 and formatting # to 8 digits) may warrant review and cleanup. locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15 sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \ for loc in locs] sigfigs.sort() self.format = '%1.' + str(sigfigs[-1]) + 'f' if self._usetex: self.format = '$%s$' % self.format elif self._useMathText: self.format = '$\mathdefault{%s}$' % self.format def pprint_val(self, x): xp = (x-self.offset)/10**self.orderOfMagnitude if np.absolute(xp) < 1e-8: xp = 0 return self.format % xp def _formatSciNotation(self, s): # transform 1e+004 into 1e4, for example tup = s.split('e') try: significand = tup[0].rstrip('0').rstrip('.') sign = tup[1][0].replace('+', '') exponent = tup[1][1:].lstrip('0') if self._useMathText or self._usetex: if significand == '1': # reformat 1x10^y as 10^y significand = '' if exponent: exponent = '10^{%s%s}'%(sign, exponent) if significand and exponent: return r'%s{\times}%s'%(significand, exponent) else: return r'%s%s'%(significand, exponent) else: s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e') return s except IndexError, msg: return s class LogFormatter(Formatter): """ Format values for log axis; if attribute *decadeOnly* is True, only the decades will be labelled. """ def __init__(self, base=10.0, labelOnlyBase = True): """ *base* is used to locate the decade tick, which will be the only one to be labeled if *labelOnlyBase* is ``False`` """ self._base = base+0.0 self.labelOnlyBase=labelOnlyBase self.decadeOnly = True def base(self,base): 'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`' self._base=base def label_minor(self,labelOnlyBase): 'switch on/off minor ticks labeling' self.labelOnlyBase=labelOnlyBase def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' vmin, vmax = self.axis.get_view_interval() d = abs(vmax - vmin) b=self._base if x == 0.0: return '0' sign = np.sign(x) # only label the decades fx = math.log(abs(x))/math.log(b) isDecade = self.is_decade(fx) if not isDecade and self.labelOnlyBase: s = '' elif x>10000: s= '%1.0e'%x elif x<1: s = '%1.0e'%x else : s = self.pprint_val(x,d) if sign == -1: s = '-%s' % s return self.fix_minus(s) def format_data(self,value): self.labelOnlyBase = False value = cbook.strip_math(self.__call__(value)) self.labelOnlyBase = True return value def format_data_short(self,value): 'return a short formatted string representation of a number' return '%1.3g'%value def is_decade(self, x): n = self.nearest_long(x) return abs(x-n)<1e-10 def nearest_long(self, x): if x==0: return 0L elif x>0: return long(x+0.5) else: return long(x-0.5) def pprint_val(self, x, d): #if the number is not too big and it's an int, format it as an #int if abs(x)<1e4 and x==int(x): return '%d' % x if d < 1e-2: fmt = '%1.3e' elif d < 1e-1: fmt = '%1.3f' elif d > 1e5: fmt = '%1.1e' elif d > 10 : fmt = '%1.1f' elif d > 1 : fmt = '%1.2f' else: fmt = '%1.3f' s = fmt % x #print d, x, fmt, s tup = s.split('e') if len(tup)==2: mantissa = tup[0].rstrip('0').rstrip('.') sign = tup[1][0].replace('+', '') exponent = tup[1][1:].lstrip('0') s = '%se%s%s' %(mantissa, sign, exponent) else: s = s.rstrip('0').rstrip('.') return s class LogFormatterExponent(LogFormatter): """ Format values for log axis; using ``exponent = log_base(value)`` """ def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) d = abs(vmax-vmin) b=self._base if x == 0: return '0' sign = np.sign(x) # only label the decades fx = math.log(abs(x))/math.log(b) isDecade = self.is_decade(fx) if not isDecade and self.labelOnlyBase: s = '' #if 0: pass elif fx>10000: s= '%1.0e'%fx #elif x<1: s = '$10^{%d}$'%fx #elif x<1: s = '10^%d'%fx elif fx<1: s = '%1.0e'%fx else : s = self.pprint_val(fx,d) if sign == -1: s = '-%s' % s return self.fix_minus(s) class LogFormatterMathtext(LogFormatter): """ Format values for log axis; using ``exponent = log_base(value)`` """ def __call__(self, x, pos=None): 'Return the format for tick val *x* at position *pos*' b = self._base # only label the decades if x == 0: return '$0$' sign = np.sign(x) fx = math.log(abs(x))/math.log(b) isDecade = self.is_decade(fx) usetex = rcParams['text.usetex'] if sign == -1: sign_string = '-' else: sign_string = '' if not isDecade and self.labelOnlyBase: s = '' elif not isDecade: if usetex: s = r'$%s%d^{%.2f}$'% (sign_string, b, fx) else: s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx) else: if usetex: s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx)) else: s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx)) return s class Locator(TickHelper): """ Determine the tick locations; Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis` because the locator stores references to the Axis data and view limits """ def __call__(self): 'Return the locations of the ticks' raise NotImplementedError('Derived must override') def view_limits(self, vmin, vmax): """ select a scale for the range from vmin to vmax Normally This will be overridden. """ return mtransforms.nonsingular(vmin, vmax) def autoscale(self): 'autoscale the view limits' return self.view_limits(*self.axis.get_view_interval()) def pan(self, numsteps): 'Pan numticks (can be positive or negative)' ticks = self() numticks = len(ticks) vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) if numticks>2: step = numsteps*abs(ticks[0]-ticks[1]) else: d = abs(vmax-vmin) step = numsteps*d/6. vmin += step vmax += step self.axis.set_view_interval(vmin, vmax, ignore=True) def zoom(self, direction): "Zoom in/out on axis; if direction is >0 zoom in, else zoom out" vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) interval = abs(vmax-vmin) step = 0.1*interval*direction self.axis.set_view_interval(vmin + step, vmax - step, ignore=True) def refresh(self): 'refresh internal information based on current lim' pass class IndexLocator(Locator): """ Place a tick on every multiple of some base number of points plotted, eg on every 5th point. It is assumed that you are doing index plotting; ie the axis is 0, len(data). This is mainly useful for x ticks. """ def __init__(self, base, offset): 'place ticks on the i-th data points where (i-offset)%base==0' self._base = base self.offset = offset def __call__(self): 'Return the locations of the ticks' dmin, dmax = self.axis.get_data_interval() return np.arange(dmin + self.offset, dmax+1, self._base) class FixedLocator(Locator): """ Tick locations are fixed. If nbins is not None, the array of possible positions will be subsampled to keep the number of ticks <= nbins +1. """ def __init__(self, locs, nbins=None): self.locs = locs self.nbins = nbins if self.nbins is not None: self.nbins = max(self.nbins, 2) def __call__(self): 'Return the locations of the ticks' if self.nbins is None: return self.locs step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1) return self.locs[::step] class NullLocator(Locator): """ No ticks """ def __call__(self): 'Return the locations of the ticks' return [] class LinearLocator(Locator): """ Determine the tick locations The first time this function is called it will try to set the number of ticks to make a nice tick partitioning. Thereafter the number of ticks will be fixed so that interactive navigation will be nice """ def __init__(self, numticks = None, presets=None): """ Use presets to set locs based on lom. A dict mapping vmin, vmax->locs """ self.numticks = numticks if presets is None: self.presets = {} else: self.presets = presets def __call__(self): 'Return the locations of the ticks' vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) if vmax<vmin: vmin, vmax = vmax, vmin if (vmin, vmax) in self.presets: return self.presets[(vmin, vmax)] if self.numticks is None: self._set_numticks() if self.numticks==0: return [] ticklocs = np.linspace(vmin, vmax, self.numticks) return ticklocs def _set_numticks(self): self.numticks = 11 # todo; be smart here; this is just for dev def view_limits(self, vmin, vmax): 'Try to choose the view limits intelligently' if vmax<vmin: vmin, vmax = vmax, vmin if vmin==vmax: vmin-=1 vmax+=1 exponent, remainder = divmod(math.log10(vmax - vmin), 1) if remainder < 0.5: exponent -= 1 scale = 10**(-exponent) vmin = math.floor(scale*vmin)/scale vmax = math.ceil(scale*vmax)/scale return mtransforms.nonsingular(vmin, vmax) def closeto(x,y): if abs(x-y)<1e-10: return True else: return False class Base: 'this solution has some hacks to deal with floating point inaccuracies' def __init__(self, base): assert(base>0) self._base = base def lt(self, x): 'return the largest multiple of base < x' d,m = divmod(x, self._base) if closeto(m,0) and not closeto(m/self._base,1): return (d-1)*self._base return d*self._base def le(self, x): 'return the largest multiple of base <= x' d,m = divmod(x, self._base) if closeto(m/self._base,1): # was closeto(m, self._base) #looks like floating point error return (d+1)*self._base return d*self._base def gt(self, x): 'return the smallest multiple of base > x' d,m = divmod(x, self._base) if closeto(m/self._base,1): #looks like floating point error return (d+2)*self._base return (d+1)*self._base def ge(self, x): 'return the smallest multiple of base >= x' d,m = divmod(x, self._base) if closeto(m,0) and not closeto(m/self._base,1): return d*self._base return (d+1)*self._base def get_base(self): return self._base class MultipleLocator(Locator): """ Set a tick on every integer that is multiple of base in the view interval """ def __init__(self, base=1.0): self._base = Base(base) def __call__(self): 'Return the locations of the ticks' vmin, vmax = self.axis.get_view_interval() if vmax<vmin: vmin, vmax = vmax, vmin vmin = self._base.ge(vmin) base = self._base.get_base() n = (vmax - vmin + 0.001*base)//base locs = vmin + np.arange(n+1) * base return locs def view_limits(self, dmin, dmax): """ Set the view limits to the nearest multiples of base that contain the data """ vmin = self._base.le(dmin) vmax = self._base.ge(dmax) if vmin==vmax: vmin -=1 vmax +=1 return mtransforms.nonsingular(vmin, vmax) def scale_range(vmin, vmax, n = 1, threshold=100): dv = abs(vmax - vmin) maxabsv = max(abs(vmin), abs(vmax)) if maxabsv == 0 or dv/maxabsv < 1e-12: return 1.0, 0.0 meanv = 0.5*(vmax+vmin) if abs(meanv)/dv < threshold: offset = 0 elif meanv > 0: ex = divmod(math.log10(meanv), 1)[0] offset = 10**ex else: ex = divmod(math.log10(-meanv), 1)[0] offset = -10**ex ex = divmod(math.log10(dv/n), 1)[0] scale = 10**ex return scale, offset class MaxNLocator(Locator): """ Select no more than N intervals at nice locations. """ def __init__(self, nbins = 10, steps = None, trim = True, integer=False, symmetric=False): self._nbins = int(nbins) self._trim = trim self._integer = integer self._symmetric = symmetric if steps is None: self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10] else: if int(steps[-1]) != 10: steps = list(steps) steps.append(10) self._steps = steps if integer: self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001] def bin_boundaries(self, vmin, vmax): nbins = self._nbins scale, offset = scale_range(vmin, vmax, nbins) if self._integer: scale = max(1, scale) vmin -= offset vmax -= offset raw_step = (vmax-vmin)/nbins scaled_raw_step = raw_step/scale for step in self._steps: if step < scaled_raw_step: continue step *= scale best_vmin = step*divmod(vmin, step)[0] best_vmax = best_vmin + step*nbins if (best_vmax >= vmax): break if self._trim: extra_bins = int(divmod((best_vmax - vmax), step)[0]) nbins -= extra_bins return (np.arange(nbins+1) * step + best_vmin + offset) def __call__(self): vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) return self.bin_boundaries(vmin, vmax) def view_limits(self, dmin, dmax): if self._symmetric: maxabs = max(abs(dmin), abs(dmax)) dmin = -maxabs dmax = maxabs dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05) return np.take(self.bin_boundaries(dmin, dmax), [0,-1]) def decade_down(x, base=10): 'floor x to the nearest lower decade' lx = math.floor(math.log(x)/math.log(base)) return base**lx def decade_up(x, base=10): 'ceil x to the nearest higher decade' lx = math.ceil(math.log(x)/math.log(base)) return base**lx def is_decade(x,base=10): lx = math.log(x)/math.log(base) return lx==int(lx) class LogLocator(Locator): """ Determine the tick locations for log axes """ def __init__(self, base=10.0, subs=[1.0]): """ place ticks on the location= base**i*subs[j] """ self.base(base) self.subs(subs) self.numticks = 15 def base(self,base): """ set the base of the log scaling (major tick every base**i, i interger) """ self._base=base+0.0 def subs(self,subs): """ set the minor ticks the log scaling every base**i*subs[j] """ if subs is None: self._subs = None # autosub else: self._subs = np.asarray(subs)+0.0 def _set_numticks(self): self.numticks = 15 # todo; be smart here; this is just for dev def __call__(self): 'Return the locations of the ticks' b=self._base vmin, vmax = self.axis.get_view_interval() if vmin <= 0.0: vmin = self.axis.get_minpos() if vmin <= 0.0: raise ValueError( "Data has no positive values, and therefore can not be log-scaled.") vmin = math.log(vmin)/math.log(b) vmax = math.log(vmax)/math.log(b) if vmax<vmin: vmin, vmax = vmax, vmin numdec = math.floor(vmax)-math.ceil(vmin) if self._subs is None: # autosub if numdec>10: subs = np.array([1.0]) elif numdec>6: subs = np.arange(2.0, b, 2.0) else: subs = np.arange(2.0, b) else: subs = self._subs stride = 1 while numdec/stride+1 > self.numticks: stride += 1 decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride) if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0): ticklocs = [] for decadeStart in b**decades: ticklocs.extend( subs*decadeStart ) else: ticklocs = b**decades return np.array(ticklocs) def view_limits(self, vmin, vmax): 'Try to choose the view limits intelligently' if vmax<vmin: vmin, vmax = vmax, vmin minpos = self.axis.get_minpos() if minpos<=0: raise ValueError( "Data has no positive values, and therefore can not be log-scaled.") if vmin <= minpos: vmin = minpos if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base) if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base) if vmin==vmax: vmin = decade_down(vmin,self._base) vmax = decade_up(vmax,self._base) result = mtransforms.nonsingular(vmin, vmax) return result class SymmetricalLogLocator(Locator): """ Determine the tick locations for log axes """ def __init__(self, transform, subs=[1.0]): """ place ticks on the location= base**i*subs[j] """ self._transform = transform self._subs = subs self.numticks = 15 def _set_numticks(self): self.numticks = 15 # todo; be smart here; this is just for dev def __call__(self): 'Return the locations of the ticks' b = self._transform.base vmin, vmax = self.axis.get_view_interval() vmin, vmax = self._transform.transform((vmin, vmax)) if vmax<vmin: vmin, vmax = vmax, vmin numdec = math.floor(vmax)-math.ceil(vmin) if self._subs is None: if numdec>10: subs = np.array([1.0]) elif numdec>6: subs = np.arange(2.0, b, 2.0) else: subs = np.arange(2.0, b) else: subs = np.asarray(self._subs) stride = 1 while numdec/stride+1 > self.numticks: stride += 1 decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride) if len(subs) > 1 or subs[0] != 1.0: ticklocs = [] for decade in decades: ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade))) else: ticklocs = np.sign(decades) * b ** np.abs(decades) return np.array(ticklocs) def view_limits(self, vmin, vmax): 'Try to choose the view limits intelligently' b = self._transform.base if vmax<vmin: vmin, vmax = vmax, vmin if not is_decade(abs(vmin), b): if vmin < 0: vmin = -decade_up(-vmin, b) else: vmin = decade_down(vmin, b) if not is_decade(abs(vmax), b): if vmax < 0: vmax = -decade_down(-vmax, b) else: vmax = decade_up(vmax, b) if vmin == vmax: if vmin < 0: vmin = -decade_up(-vmin, b) vmax = -decade_down(-vmax, b) else: vmin = decade_down(vmin, b) vmax = decade_up(vmax, b) result = mtransforms.nonsingular(vmin, vmax) return result class AutoLocator(MaxNLocator): def __init__(self): MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10]) class OldAutoLocator(Locator): """ On autoscale this class picks the best MultipleLocator to set the view limits and the tick locs. """ def __init__(self): self._locator = LinearLocator() def __call__(self): 'Return the locations of the ticks' self.refresh() return self._locator() def refresh(self): 'refresh internal information based on current lim' vmin, vmax = self.axis.get_view_interval() vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05) d = abs(vmax-vmin) self._locator = self.get_locator(d) def view_limits(self, vmin, vmax): 'Try to choose the view limits intelligently' d = abs(vmax-vmin) self._locator = self.get_locator(d) return self._locator.view_limits(vmin, vmax) def get_locator(self, d): 'pick the best locator based on a distance' d = abs(d) if d<=0: locator = MultipleLocator(0.2) else: try: ld = math.log10(d) except OverflowError: raise RuntimeError('AutoLocator illegal data interval range') fld = math.floor(ld) base = 10**fld #if ld==fld: base = 10**(fld-1) #else: base = 10**fld if d >= 5*base : ticksize = base elif d >= 2*base : ticksize = base/2.0 else : ticksize = base/5.0 locator = MultipleLocator(ticksize) return locator __all__ = ('TickHelper', 'Formatter', 'FixedFormatter', 'NullFormatter', 'FuncFormatter', 'FormatStrFormatter', 'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent', 'LogFormatterMathtext', 'Locator', 'IndexLocator', 'FixedLocator', 'NullLocator', 'LinearLocator', 'LogLocator', 'AutoLocator', 'MultipleLocator', 'MaxNLocator', )
agpl-3.0
THEdavehogue/glassdoor-analysis
topic_modeling.py
1
9413
import os import sys import numpy as np import pandas as pd import spacy import matplotlib.pyplot as plt from PIL import Image from clean_text import STOPLIST from wordcloud import WordCloud from itertools import combinations from progressbar import ProgressBar from sklearn.decomposition import NMF from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import TfidfVectorizer plt.style.use('ggplot') class NMFCluster(object): ''' Class to run NMF clustering on a corpus of text INPUT: df: pandas Dataframe containing 'lemmatized_text' column for TF-IDF ''' def __init__(self, pro_or_con, max_topics, optimum_topics=None, tfidf_max_features=None, tfidf_max_df=0.9, tfidf_min_df=1000, nmf_alpha=0.1, nmf_l1_ratio=0.5, random_state=None): self.pro_or_con = pro_or_con self.max_topics = int(max_topics) self.num_topics = np.arange(1, max_topics + 1) self.optimum_topics = optimum_topics self.reconstruction_err_array = [] self.tfidf_max_features = tfidf_max_features self.tfidf_max_df = tfidf_max_df self.tfidf_min_df = tfidf_min_df self.nmf_alpha = nmf_alpha self.nmf_l1_ratio = nmf_l1_ratio self.random_state = random_state self.stop_words = STOPLIST def optimize_nmf(self, df): ''' Function to optimize the number of topics used in NMF clustering. INPUT: df: pandas Dataframe containing 'lemmatized_text' column for TF-IDF ''' self.fit_tfidf(df) if not self.optimum_topics: avg_cosine_sim = [] pbar = ProgressBar() for i in pbar(self.num_topics): cosine_sim = [] self.nmf = NMF(n_components=i, alpha=self.nmf_alpha, l1_ratio=self.nmf_l1_ratio, random_state=self.random_state).fit(self.tfidf_matrix) err = self.nmf.reconstruction_err_ self.H_matrix = self.nmf.components_ if i == 1: avg_cosine_sim.append(1) else: idx_arr = np.arange(i) for combo in combinations(idx_arr, 2): vect_1 = self.H_matrix[:, int(combo[0])].reshape(-1, 1) vect_2 = self.H_matrix[:, int(combo[1])].reshape(-1, 1) sim = cosine_similarity(vect_1, vect_2) cosine_sim.append(sim) avg_cosine_sim.append(np.mean(cosine_sim)) self.reconstruction_err_array.append(err) fig = plt.figure(figsize=(16, 8)) ax_1 = fig.add_subplot(211) ax_1.plot(self.num_topics, self.reconstruction_err_array) ax_1.set_title("Reconstruction Error vs Number of Topics") ax_1.set_xlabel("Number of Topics") ax_1.set_ylabel("Reconstruction Error") ax_2 = fig.add_subplot(212) ax_2.plot(self.num_topics, avg_cosine_sim) ax_2.set_title("Avg Cosine Similarity Between Topics") ax_2.set_xlabel("Number of Topics") ax_2.set_ylabel("Avg Cosine Similarity") plt.tight_layout() if self.pro_or_con == 'pro': img_path = os.path.join('images', 'positive') else: img_path = os.path.join('images', 'negative') plt.savefig(os.path.join(img_path, "nmf_metrics.png")) plt.show() self.optimum_topics = int(raw_input("Desired topics from graph: ")) def fit_nmf(self, df): ''' Function to run NMF clustering on dataframe INPUT: df: pandas Dataframe containing 'lemmatized_text' column for TF-IDF ''' self.optimize_nmf(df) self.nmf = NMF(n_components=self.optimum_topics, alpha=self.nmf_alpha, l1_ratio=self.nmf_l1_ratio, random_state=self.random_state).fit(self.tfidf_matrix) self.W_matrix = self.nmf.transform(self.tfidf_matrix) sums = self.W_matrix.sum(axis=1) self.W_pct = self.W_matrix / sums[:, None] self.labels = self.W_pct >= 0.20 print "Reconstruction Error: {}".format(self.nmf.reconstruction_err_) def fit_tfidf(self, df): ''' Function to fit a TF-IDF matrix to a corpus of text INPUT: df: df with 'lemmatized_text' to analyze ''' self.tfidf = TfidfVectorizer(input='content', use_idf=True, lowercase=True, max_features=self.tfidf_max_features, max_df=self.tfidf_max_df, min_df=self.tfidf_min_df) self.tfidf_matrix = self.tfidf.fit_transform( df['lemmatized_text']).toarray() self.tfidf_features = np.array(self.tfidf.get_feature_names()) self.tfidf_reverse_lookup = { word: idx for idx, word in enumerate(self.tfidf_features)} def top_words_by_topic(self, n_top_words, topic=None): ''' Function to find the top n words in a topic INPUT: n_top_words: number of words to print in the topic summary topic: index of topic ''' if topic != None: idx = np.argsort(self.nmf.components_[topic])[-n_top_words:][::-1] return self.tfidf_features[idx] else: idxs = [np.argsort(topic)[-n_top_words:][::-1] for topic in self.nmf.components_] return np.array([self.tfidf_features[idx] for idx in idxs]) def topic_attribution_by_document(self, document_idx): ''' Function to calculate percent attributability for each topic and doc INPUT: document_idx: index of document in corpus ''' idxs = np.where(self.labels[document_idx] == 1)[0] idxs = idxs[np.argsort(self.W_pct[document_idx, idxs])[::-1]] return np.array([(idx, pct) for idx, pct in zip(idxs, self.W_pct[document_idx, idxs])]) def print_topic_summary(self, df, topic_num, num_words=20): ''' Function to print summary of a topic from NMF clustering INPUT: df: pandas DataFrame that NMF clustering was run on topic_num: index of topic from clustering num_words: top n words to print in summary ''' num_reviews = self.labels[:, topic_num].sum() print 'Summary of Topic {}:'.format(topic_num) print 'Number of reviews in topic: {}'.format(num_reviews) print 'Top {} words in topic:'.format(num_words) print self.top_words_by_topic(num_words, topic_num) if not num_reviews: return None def topic_word_frequency(self, topic_idx): ''' Return (word, frequency) tuples for creating word cloud INPUT: topic_idx: int ''' freq_sum = np.sum(self.nmf.components_[topic_idx]) frequencies = [ val / freq_sum for val in self.nmf.components_[topic_idx]] return zip(self.tfidf_features, frequencies) def plot_topic(self, topic_idx): ''' Function to plot a wordcloud based on a topic INPUT: topic_idx: index of topic from NMF clustering ''' title = raw_input('Enter a title for this plot: ') num_reviews = self.labels[:, topic_idx].sum() word_freq = self.topic_word_frequency(topic_idx) wc = WordCloud(width=2000, height=1000, max_words=150, background_color='white') wc.fit_words(word_freq) fig = plt.figure(figsize=(16, 8)) ax = fig.add_subplot(111) ax.set_title('Topic {}: {}\nNumber of Reviews in Topic: {}'.format( topic_idx, title, num_reviews), fontsize=24) ax.axis('off') ax.imshow(wc) name = 'topic_' + str(topic_idx) + '.png' if self.pro_or_con == 'pro': img_path = os.path.join('images', 'positive') else: img_path = os.path.join('images', 'negative') plt.savefig(os.path.join(img_path, name)) plt.show() def visualize_topics(self, df): ''' Function to cycle through all topics and print summary and plot cloud INPUT: df: pandas DataFrame (source for NMF text) ''' for i in range(self.optimum_topics): self.print_topic_summary(df, i) self.plot_topic(i) print '' if __name__ == '__main__': pros_df = pd.read_pickle(os.path.join('data', 'pros_df.pkl')) cons_df = pd.read_pickle(os.path.join('data', 'cons_df.pkl')) nmf_pros = NMFCluster('pro', max_topics=30, optimum_topics=21, # See plots for optimum random_state=42) # Random state for reproduceability nmf_cons = NMFCluster('con', max_topics=30, optimum_topics=15, # See plots for optimum random_state=42) # Random state for reproduceability nmf_pros.fit_nmf(pros_df) nmf_cons.fit_nmf(cons_df) nmf_pros.visualize_topics(pros_df) nmf_cons.visualize_topics(cons_df)
gpl-3.0
BorisJeremic/Real-ESSI-Examples
analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardSoftShear/Area/A_1e2/Normalized_Shear_Stress_Plot.py
48
3533
#!/usr/bin/python import h5py import matplotlib.pylab as plt import matplotlib as mpl import sys import numpy as np; plt.rcParams.update({'font.size': 28}) # set tick width mpl.rcParams['xtick.major.size'] = 10 mpl.rcParams['xtick.major.width'] = 5 mpl.rcParams['xtick.minor.size'] = 10 mpl.rcParams['xtick.minor.width'] = 5 plt.rcParams['xtick.labelsize']=24 mpl.rcParams['ytick.major.size'] = 10 mpl.rcParams['ytick.major.width'] = 5 mpl.rcParams['ytick.minor.size'] = 10 mpl.rcParams['ytick.minor.width'] = 5 plt.rcParams['ytick.labelsize']=24 ############################################################### ## Analytical Solution ############################################################### # Go over each feioutput and plot each one. thefile = "Analytical_Solution_Shear.feioutput"; finput = h5py.File(thefile) # Read the time and displacement times = finput["time"][:] shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:] shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:] shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:] shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:] normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:]; shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ; shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y ); shear_stress = shear_stress_x; shear_strain = shear_strain_x; # Configure the figure filename, according to the input filename. outfig=thefile.replace("_","-") outfigname=outfig.replace("h5.feioutput","pdf") # Plot the figure. Add labels and titles. plt.figure(figsize=(12,10)) plt.plot(shear_strain*5,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4) plt.xlabel(r"Shear Displacement $\Delta_t [mm]$") plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$") ############################################################### ## Numerical Solution ############################################################### # Go over each feioutput and plot each one. thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput"; finput = h5py.File(thefile) # Read the time and displacement times = finput["time"][:] shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:] shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:] shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:] shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:] normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:]; shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ; shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y ); shear_stress = shear_stress_x; shear_strain = shear_strain_x; # Configure the figure filename, according to the input filename. outfig=thefile.replace("_","-") outfigname=outfig.replace("h5.feioutput","pdf") # Plot the figure. Add labels and titles. plt.plot(shear_strain*5,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4) plt.xlabel(r"Shear Displacement $\Delta_t [mm]$") plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$") ######################################################## # # axes = plt.gca() # # axes.set_xlim([-7,7]) # # axes.set_ylim([-1,1]) outfigname = "Normalized_Shear_Stress.pdf"; legend = plt.legend() legend.get_frame().set_linewidth(0.0) legend.get_frame().set_facecolor('none') plt.savefig(outfigname, bbox_inches='tight') # plt.show()
cc0-1.0
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/examples/svm/plot_svm_kernels.py
1
1969
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM-Kernels ========================================================= Three different types of SVM-Kernels are displayed below. The polynomial and RBF are especially useful when the data-points are not linearly separable. """ print(__doc__) # Code source: Gaël Varoquaux # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn import svm # Our dataset and targets X = np.c_[(.4, -.7), (-1.5, -1), (-1.4, -.9), (-1.3, -1.2), (-1.1, -.2), (-1.2, -.4), (-.5, 1.2), (-1.5, 2.1), (1, 1), # -- (1.3, .8), (1.2, .5), (.2, -2), (.5, -2.4), (.2, -2.3), (0, -2.7), (1.3, 2.1)].T Y = [0] * 8 + [1] * 8 # figure number fignum = 1 # fit the model for kernel in ('linear', 'poly', 'rbf'): clf = svm.SVC(kernel=kernel, gamma=2) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -3 x_max = 3 y_min = -3 y_max = 3 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
mit
mhdella/scikit-learn
sklearn/datasets/species_distributions.py
198
7923
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References: * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes: * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset """ # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause from io import BytesIO from os import makedirs from os.path import join from os.path import exists try: # Python 2 from urllib2 import urlopen PY2 = True except ImportError: # Python 3 from urllib.request import urlopen PY2 = False import numpy as np from sklearn.datasets.base import get_data_home, Bunch from sklearn.externals import joblib DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/" SAMPLES_URL = join(DIRECTORY_URL, "samples.zip") COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip") DATA_ARCHIVE_NAME = "species_coverage.pkz" def _load_coverage(F, header_length=6, dtype=np.int16): """Load a coverage file from an open file object. This will return a numpy array of the given dtype """ header = [F.readline() for i in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = header[b'NODATA_value'] if nodata != -9999: print(nodata) M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data """ if PY2: # Numpy recarray wants Python 2 str but not unicode names = F.readline().strip().split(',') else: # Numpy recarray wants Python 3 str but not bytes... names = F.readline().decode('ascii').strip().split(',') rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4') rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def fetch_species_distributions(data_home=None, download_if_missing=True): """Loader for species distribution dataset from Phillips et. al. (2006) Read more in the :ref:`User Guide <datasets>`. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing: optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns -------- The data is returned as a Bunch object with the following attributes: coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1623,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (619,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ------ This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes ----- * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset with scikit-learn """ data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05) dtype = np.int16 if not exists(join(data_home, DATA_ARCHIVE_NAME)): print('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) for f in X.files: fhandle = BytesIO(X[f]) if 'train' in f: train = _load_csv(fhandle) if 'test' in f: test = _load_csv(fhandle) print('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) coverages = [] for f in X.files: fhandle = BytesIO(X[f]) print(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9) else: bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME)) return bunch
bsd-3-clause
lthurlow/Boolean-Constrained-Routing
networkx-1.8.1/networkx/readwrite/tests/test_gml.py
35
3099
#!/usr/bin/env python import io from nose.tools import * from nose import SkipTest import networkx class TestGraph(object): @classmethod def setupClass(cls): global pyparsing try: import pyparsing except ImportError: try: import matplotlib.pyparsing as pyparsing except: raise SkipTest('gml test: pyparsing not available.') def setUp(self): self.simple_data="""Creator me graph [ comment "This is a sample graph" directed 1 IsPlanar 1 pos [ x 0 y 1 ] node [ id 1 label "Node 1" pos [ x 1 y 1 ] ] node [ id 2 pos [ x 1 y 2 ] label "Node 2" ] node [ id 3 label "Node 3" pos [ x 1 y 3 ] ] edge [ source 1 target 2 label "Edge from node 1 to node 2" color [line "blue" thickness 3] ] edge [ source 2 target 3 label "Edge from node 2 to node 3" ] edge [ source 3 target 1 label "Edge from node 3 to node 1" ] ] """ def test_parse_gml(self): G=networkx.parse_gml(self.simple_data,relabel=True) assert_equals(sorted(G.nodes()),\ ['Node 1', 'Node 2', 'Node 3']) assert_equals( [e for e in sorted(G.edges())],\ [('Node 1', 'Node 2'), ('Node 2', 'Node 3'), ('Node 3', 'Node 1')]) assert_equals( [e for e in sorted(G.edges(data=True))],\ [('Node 1', 'Node 2', {'color': {'line': 'blue', 'thickness': 3}, 'label': 'Edge from node 1 to node 2'}), ('Node 2', 'Node 3', {'label': 'Edge from node 2 to node 3'}), ('Node 3', 'Node 1', {'label': 'Edge from node 3 to node 1'})]) def test_read_gml(self): import os,tempfile (fd,fname)=tempfile.mkstemp() fh=open(fname,'w') fh.write(self.simple_data) fh.close() Gin=networkx.read_gml(fname,relabel=True) G=networkx.parse_gml(self.simple_data,relabel=True) assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True))) assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True))) os.close(fd) os.unlink(fname) def test_relabel_duplicate(self): data=""" graph [ label "" directed 1 node [ id 0 label "same" ] node [ id 1 label "same" ] ] """ fh = io.BytesIO(data.encode('UTF-8')) fh.seek(0) assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True) def test_bool(self): G=networkx.Graph() G.add_node(1,on=True) G.add_edge(1,2,on=False) data = '\n'.join(list(networkx.generate_gml(G))) answer ="""graph [ node [ id 0 label 1 on 1 ] node [ id 1 label 2 ] edge [ source 0 target 1 on 0 ] ]""" assert_equal(data,answer)
mit
LindaLS/Sausage_Biscuits
architecture/examples/2_nn_autoencoer/load.py
6
1484
# Example implementing 5 layer encoder # Original code taken from # https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py # First train a model using train.py from __future__ import division, print_function, absolute_import # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Import libraries import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import sys sys.path.insert(0, '../..') # Add path to where TF_Model.py is, if not in the same dir from TF_Model import * # Create TF_Model, a wrapper for models created using tensorflow # Note that the configuration file 'config.txt' must be present in the directory model = TF_Model('model') # Parameters examples_to_show = 10 # Create variables for inputs, outputs and predictions X = tf.placeholder("float", [None, 784]) y_true = X y_pred = model.predict(X) # Restore sess = tf.Session() model.restore(sess, 'example_2') # Applying encode and decode over test set encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) # Compare original images with their reconstructions f, a = plt.subplots(2, examples_to_show, figsize=(10, 2)) for i in range(examples_to_show): a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) f.show() plt.draw() plt.waitforbuttonpress()
gpl-3.0
appapantula/scikit-learn
sklearn/preprocessing/__init__.py
268
1319
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', ]
bsd-3-clause
uglyboxer/linear_neuron
net-p3/lib/python3.5/site-packages/sklearn/neighbors/nearest_centroid.py
25
7219
# -*- coding: utf-8 -*- """ Nearest Centroid Classification """ # Author: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # # License: BSD 3 clause import warnings import numpy as np from scipy import sparse as sp from ..base import BaseEstimator, ClassifierMixin from ..externals.six.moves import xrange from ..metrics.pairwise import pairwise_distances from ..preprocessing import LabelEncoder from ..utils.validation import check_array, check_X_y, check_is_fitted from ..utils.sparsefuncs import csc_median_axis_0 class NearestCentroid(BaseEstimator, ClassifierMixin): """Nearest centroid classifier. Each class is represented by its centroid, with test samples classified to the class with the nearest centroid. Parameters ---------- metric: string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances (according to the metric) of all samples that belong to that particular class are minimized. If the "manhattan" metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean. shrink_threshold : float, optional (default = None) Threshold for shrinking centroids to remove features. Attributes ---------- centroids_ : array-like, shape = [n_classes, n_features] Centroid of each class Examples -------- >>> from sklearn.neighbors.nearest_centroid import NearestCentroid >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = NearestCentroid() >>> clf.fit(X, y) NearestCentroid(metric='euclidean', shrink_threshold=None) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier Notes ----- When used for text classification with tf-idf vectors, this classifier is also known as the Rocchio classifier. References ---------- Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of multiple cancer types by shrunken centroids of gene expression. Proceedings of the National Academy of Sciences of the United States of America, 99(10), 6567-6572. The National Academy of Sciences. """ def __init__(self, metric='euclidean', shrink_threshold=None): self.metric = metric self.shrink_threshold = shrink_threshold def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array, shape = [n_samples] Target values (integers) """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == 'manhattan': X, y = check_X_y(X, y, ['csc']) else: X, y = check_X_y(X, y, ['csr', 'csc']) is_X_sparse = sp.issparse(X) if is_X_sparse and self.shrink_threshold: raise ValueError("threshold shrinking not supported" " for sparse input") n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError('y has less than 2 classes') # Mask mapping each class to it's members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] # XXX: Update other averaging methods according to the metrics. if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: if self.metric != 'euclidean': warnings.warn("Averaging for metrics other than " "euclidean and manhattan not supported. " "The average is set to be the mean." ) self.centroids_[cur_class] = X[center_mask].mean(axis=0) if self.shrink_threshold: dataset_centroid_ = np.mean(X, axis=0) # m parameter for determining deviation m = np.sqrt((1. / nk) + (1. / n_samples)) # Calculate deviation using the standard deviation of centroids. variance = (X - self.centroids_[y_ind]) ** 2 variance = variance.sum(axis=0) s = np.sqrt(variance / (n_samples - n_classes)) s += np.median(s) # To deter outliers from affecting the results. mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s deviation = ((self.centroids_ - dataset_centroid_) / ms) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. signs = np.sign(deviation) deviation = (np.abs(deviation) - self.shrink_threshold) deviation[deviation < 0] = 0 deviation *= signs # Now adjust the centroids using the deviation msd = ms * deviation self.centroids_ = dataset_centroid_[np.newaxis, :] + msd return self def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Notes ----- If the metric constructor parameter is "precomputed", X is assumed to be the distance matrix between the data to be predicted and ``self.centroids_``. """ check_is_fitted(self, 'centroids_') X = check_array(X, accept_sparse='csr') return self.classes_[pairwise_distances( X, self.centroids_, metric=self.metric).argmin(axis=1)]
mit
bavardage/statsmodels
statsmodels/sandbox/km_class.py
5
11704
#a class for the Kaplan-Meier estimator import numpy as np from math import sqrt import matplotlib.pyplot as plt class KAPLAN_MEIER(object): def __init__(self, data, timesIn, groupIn, censoringIn): raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py') #store the inputs self.data = data self.timesIn = timesIn self.groupIn = groupIn self.censoringIn = censoringIn def fit(self): #split the data into groups based on the predicting variable #get a set of all the groups groups = list(set(self.data[:,self.groupIn])) #create an empty list to store the data for different groups groupList = [] #create an empty list for each group and add it to groups for i in range(len(groups)): groupList.append([]) #iterate through all the groups in groups for i in range(len(groups)): #iterate though the rows of dataArray for j in range(len(self.data)): #test if this row has the correct group if self.data[j,self.groupIn] == groups[i]: #add the row to groupList groupList[i].append(self.data[j]) #create an empty list to store the times for each group timeList = [] #iterate through all the groups for i in range(len(groupList)): #create an empty list times = [] #iterate through all the rows of the group for j in range(len(groupList[i])): #get a list of all the times in the group times.append(groupList[i][j][self.timesIn]) #get a sorted set of the times and store it in timeList times = list(sorted(set(times))) timeList.append(times) #get a list of the number at risk and events at each time #create an empty list to store the results in timeCounts = [] #create an empty list to hold points for plotting points = [] #create a list for points where censoring occurs censoredPoints = [] #iterate trough each group for i in range(len(groupList)): #initialize a variable to estimate the survival function survival = 1 #initialize a variable to estimate the variance of #the survival function varSum = 0 #initialize a counter for the number at risk riskCounter = len(groupList[i]) #create a list for the counts for this group counts = [] ##create a list for points to plot x = [] y = [] #iterate through the list of times for j in range(len(timeList[i])): if j != 0: if j == 1: #add an indicator to tell if the time #starts a new group groupInd = 1 #add (0,1) to the list of points x.append(0) y.append(1) #add the point time to the right of that x.append(timeList[i][j-1]) y.append(1) #add the point below that at survival x.append(timeList[i][j-1]) y.append(survival) #add the survival to y y.append(survival) else: groupInd = 0 #add survival twice to y y.append(survival) y.append(survival) #add the time twice to x x.append(timeList[i][j-1]) x.append(timeList[i][j-1]) #add each censored time, number of censorings and #its survival to censoredPoints censoredPoints.append([timeList[i][j-1], censoringNum,survival,groupInd]) #add the count to the list counts.append([timeList[i][j-1],riskCounter, eventCounter,survival, sqrt(((survival)**2)*varSum)]) #increment the number at risk riskCounter += -1*(riskChange) #initialize a counter for the change in the number at risk riskChange = 0 #initialize a counter to zero eventCounter = 0 #intialize a counter to tell when censoring occurs censoringCounter = 0 censoringNum = 0 #iterate through the observations in each group for k in range(len(groupList[i])): #check of the observation has the given time if (groupList[i][k][self.timesIn]) == (timeList[i][j]): #increment the number at risk counter riskChange += 1 #check if this is an event or censoring if groupList[i][k][self.censoringIn] == 1: #add 1 to the counter eventCounter += 1 else: censoringNum += 1 #check if there are any events at this time if eventCounter != censoringCounter: censoringCounter = eventCounter #calculate the estimate of the survival function survival *= ((float(riskCounter) - eventCounter)/(riskCounter)) try: #calculate the estimate of the variance varSum += (eventCounter)/((riskCounter) *(float(riskCounter)- eventCounter)) except ZeroDivisionError: varSum = 0 #append the last row to counts counts.append([timeList[i][len(timeList[i])-1], riskCounter,eventCounter,survival, sqrt(((survival)**2)*varSum)]) #add the last time once to x x.append(timeList[i][len(timeList[i])-1]) x.append(timeList[i][len(timeList[i])-1]) #add the last survival twice to y y.append(survival) #y.append(survival) censoredPoints.append([timeList[i][len(timeList[i])-1], censoringNum,survival,1]) #add the list for the group to al ist for all the groups timeCounts.append(np.array(counts)) points.append([x,y]) #returns a list of arrays, where each array has as it columns: the time, #the number at risk, the number of events, the estimated value of the #survival function at that time, and the estimated standard error at #that time, in that order self.results = timeCounts self.points = points self.censoredPoints = censoredPoints def plot(self): x = [] #iterate through the groups for i in range(len(self.points)): #plot x and y plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1])) #create lists of all the x and y values x += self.points[i][0] for j in range(len(self.censoredPoints)): #check if censoring is occuring if (self.censoredPoints[j][1] != 0): #if this is the first censored point if (self.censoredPoints[j][3] == 1) and (j == 0): #calculate a distance beyond 1 to place it #so all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the censored points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((1+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this censored point starts a new group elif ((self.censoredPoints[j][3] == 1) and (self.censoredPoints[j-1][3] == 1)): #calculate a distance beyond 1 to place it #so all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the censored points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((1+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this is the last censored point elif j == (len(self.censoredPoints) - 1): #calculate a distance beyond the previous time #so that all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this is a point in the middle of the group else: #calcuate a distance beyond the current time #to place the point, so they all fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j+1][0]) - self.censoredPoints[j][0])) #iterate through all the points at this time for k in range(self.censoredPoints[j][1]): #plot a vetical line for censoring plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #set the size of the plot so it extends to the max x and above 1 for y plt.xlim((0,np.max(x))) plt.ylim((0,1.05)) #label the axes plt.xlabel('time') plt.ylabel('survival') plt.show() def show_results(self): #start a string that will be a table of the results resultsString = '' #iterate through all the groups for i in range(len(self.results)): #label the group and header resultsString += ('Group {0}\n\n'.format(i) + 'Time At Risk Events Survival Std. Err\n') for j in self.results[i]: #add the results to the string resultsString += ( '{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format( int(j[0]),int(j[1]),int(j[2]),j[3],j[4])) print(resultsString)
bsd-3-clause
scikit-beam/scikit-beam-examples
demos/xrf/demo_xrf_spectrum.py
5
5373
# ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # @author: Li Li (lili@bnl.gov) # # created on 09/03/2014 # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## from __future__ import absolute_import, division, print_function import numpy as np from skbeam.core.constants import XrfElement from skbeam.core.fitting import gaussian def get_line(ax, name, incident_energy): """ Plot emission lines for a given element. Parameters ---------- name : str or int element name, or atomic number incident_energy : float xray incident energy for fluorescence emission """ e = XrfElement(name) lines = e.emission_line.all ratio = [val for val in e.cs(incident_energy).all if val[1] > 0] i_min = 1e-6 for item in ratio: for data in lines: if item[0] == data[0]: ax.plot([data[1], data[1]], [i_min, item[1]], 'g-', linewidth=2.0) ax.set_title('Emission lines for %s at %s eV' % (name, incident_energy)) ax.set_xlabel('Energy [KeV]') ax.set_ylabel('Intensity') def get_spectrum(ax, name, incident_energy, emax=15): """ Plot fluorescence spectrum for a given element. Parameters ---------- name : str or int element name, or atomic number incident_energy : float xray incident energy for fluorescence emission emax : float max value on spectrum """ e = XrfElement(name) lines = e.emission_line.all ratio = [val for val in e.cs(incident_energy).all if val[1] > 0] x = np.arange(0, emax, 0.01) spec = np.zeros(len(x)) i_min = 1e-6 for item in ratio: for data in lines: if item[0] == data[0]: ax.plot([data[1], data[1]], [i_min, item[1]], 'g-', linewidth=2.0) std = 0.1 area = std * np.sqrt(2 * np.pi) for item in ratio: for data in lines: if item[0] == data[0]: spec += gaussian(x, area, data[1], std) * item[1] #plt.semilogy(x, spec) ax.set_title('Simulated spectrum for %s at %s eV' % (name, incident_energy)) ax.set_xlabel('Energy [KeV]') ax.set_ylabel('Intensity') ax.plot(x, spec) def run_demo(): import matplotlib.pyplot as plt e = XrfElement('Cu') print('Cu ka1 = %s' % e.emission_line['ka1']) print('all Cu emission lines\n{}'.format(e.emission_line.all)) print('fluorescence cross section of Cu at 12 eV = %s' % e.cs(12).all) print('showing spectrum for Cu at 12 eV') fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) ax = ax.ravel() get_line(ax[0], 'Cu', 12) get_spectrum(ax[1], 'Cu', 12) get_line(ax[2], 'Gd', 12) get_spectrum(ax[3], 'Gd', 12) plt.show() if __name__ == "__main__": run_demo()
bsd-3-clause
atmtools/typhon
typhon/tests/plots/test_colors.py
1
4724
# -*- coding: utf-8 -*- """Testing the functions in typhon.plots.colors. """ import filecmp import os from tempfile import mkstemp import matplotlib.pyplot as plt import matplotlib.colors as mcolors import numpy as np import pytest from typhon.plots import colors class TestColors: """Testing the cm functions.""" ref_dir = os.path.join(os.path.dirname(__file__), "reference", "") def setup_method(self): """Create a temporary file.""" fd, self.f = mkstemp() os.close(fd) def teardown_method(self): """Delete temporary file.""" os.remove(self.f) def test_cmap2rgba(self): """Check colormap to RGB conversion.""" ref = np.loadtxt(os.path.join(self.ref_dir, 'viridis.txt'), comments='%') rgb = colors.cmap2rgba('viridis', 256)[:, :3] # ignore alpha assert np.allclose(ref, rgb, atol=0.001) def test_cmap2rgba_interpolation(self): """Check colormap to RGBA interpolation.""" max_planck_duplicates = np.array([ [0., 0.4627451, 0.40784314, 1.], [0.48235294, 0.70980392, 0.67843137, 1.], [0.74901961, 0.85098039, 0.83137255, 1.], [0.96078431, 0.97254902, 0.97647059, 1.], [0.96078431, 0.97254902, 0.97647059, 1.], ]) max_planck_interpolated = np.array([ [0., 0.4627451, 0.40784314, 1.], [0.36318339, 0.64876586, 0.61158016, 1.], [0.6172549, 0.7812226, 0.75580161, 1.], [0.8038293, 0.88244521, 0.86892734, 1.], [0.96078431, 0.97254902, 0.97647059, 1.], ]) assert np.allclose( max_planck_interpolated, colors.cmap2rgba('max_planck', 5, interpolate=True) ) assert np.allclose( max_planck_duplicates, colors.cmap2rgba('max_planck', 5, interpolate=False) ) def test_cmap2cpt(self): """Export colormap to cpt file.""" colors.cmap2cpt('viridis', filename=self.f) ref = os.path.join(self.ref_dir, 'viridis.cpt') with open(self.f) as testfile, open(ref) as reffile: assert testfile.readlines() == reffile.readlines() def test_cmap2txt(self): """Export colormap to txt file.""" colors.cmap2txt('viridis', filename=self.f) ref = os.path.join(self.ref_dir, 'viridis.txt') with open(self.f) as testfile, open(ref) as reffile: assert testfile.readlines() == reffile.readlines() def test_cmap2act(self): """Export colormap to act file.""" colors.cmap2act('viridis', filename=self.f) ref = os.path.join(self.ref_dir, 'viridis.act') assert filecmp.cmp(self.f, ref) def test_cmap_from_txt(self): """Import colormap from txt file.""" idx = np.linspace(0, 1, 256) viridis = plt.get_cmap('viridis') cmap = colors.cmap_from_txt(os.path.join( self.ref_dir, 'viridis.txt'), name="viridis_read") assert np.allclose(viridis(idx), cmap(idx), atol=0.001) def test_cmap_from_act(self): """Import colormap from act file.""" idx = np.linspace(0, 1, 256) viridis = plt.get_cmap('viridis') cmap = colors.cmap_from_act( os.path.join(self.ref_dir, 'viridis.act'), name="viridis_read") assert np.allclose(viridis(idx), cmap(idx), atol=0.004) def test_get_material_design(self): """Test the retrieval of material design colors.""" hex_color = colors.get_material_design('red', shade='500') assert hex_color == '#F44336' hex_colors = colors.get_material_design('red', shade=None) assert hex_colors == ['#FFEBEE', '#FFCDD2', '#EF9A9A', '#E57373', '#EF5350', '#F44336', '#E53935', '#D32F2F', '#C62828', '#B71C1C', '#FF8A80', '#FF5252', '#FF1744', '#D50000'] def test_get_material_design_valuerror(self): """Test the behavior for undefined material design colors or shades.""" with pytest.raises(ValueError): colors.get_material_design('undefined_color') with pytest.raises(ValueError): colors.get_material_design('red', 'undefined_shade') def test_named_color_mapping(self): """Test if the typhon colors are available in the name mapping.""" assert all([c in mcolors.get_named_colors_mapping() for c in colors.TYPHON_COLORS.keys()]) def test_named_color_hex(self): """Test if the 'ty:uhh-red' hex-value is correct.""" assert mcolors.get_named_colors_mapping()['ty:uhh-red'] == '#ee1d23'
mit
ndingwall/scikit-learn
sklearn/decomposition/_base.py
5
5517
"""Principal Component Analysis Base Classes""" # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis A. Engemann <denis-alexander.engemann@inria.fr> # Kyle Kastner <kastnerkyle@gmail.com> # # License: BSD 3 clause import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin from ..utils.validation import check_is_fitted from abc import ABCMeta, abstractmethod class _BasePCA(TransformerMixin, BaseEstimator, metaclass=ABCMeta): """Base class for PCA methods. Warning: This class should not be used directly. Use derived classes instead. """ def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision @abstractmethod def fit(self, X, y=None): """Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ def transform(self, X): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, n_components=2) >>> ipca.transform(X) # doctest: +SKIP """ check_is_fitted(self) X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False) if self.mean_ is not None: X = X - self.mean_ X_transformed = np.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed def inverse_transform(self, X): """Transform data back to its original space. In other words, return an input X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening. """ if self.whiten: return np.dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_) + self.mean_ else: return np.dot(X, self.components_) + self.mean_
bsd-3-clause
johnwu93/find_best_mall
recomendation system/nmf_analysis.py
3
2993
__author__ = 'John' #from mall_count_dataset import dict as data import re import numpy as np from sklearn import decomposition from numpy import linalg as LA def get_category_matrix(data): #get the category count matrix from the joe jean dataset. #This dataset is clean #constants category_size = 0 category_id = dict mall_id = dict mall_size = 0 category_id ={} mall_id ={} #category_id = {'stupid': 100000000000} #fillers to define the type in the dictionary #mall_id = {'stupid': 100000000000} #setting up category representation first for mall in data: for category in data[mall]: if not category in category_id: category_id[category] = category_size category_size = category_size+1 mall_id[mall] = mall_size mall_size = mall_size +1 for category in (' accessories', ' beauty products', ' clothing',): correct_name = re.sub('^ ', '', category) category_id[correct_name] = category_id[category] category_id.pop(category) #category_id.pop('stupid') #mall_id.pop('stupid') #convert the count dataset into a matrix X = np.zeros((category_size, mall_size)) for mall in data: for category in data[mall]: correct_name = re.sub('^ ', '', category) X[category_id[correct_name], mall_id[mall]] = data[mall][category] #create category vector feature_names = {v: k for k, v in category_id.items()} return (X, feature_names) def nmf_feature_extraction(X, n_topics=10, sparse_degree = 1, rand_id=40): #This is used to get features for malls based off their topics #doing nmf on the topics mall = decomposition.NMF(n_components=n_topics, sparseness='components', beta=sparse_degree, random_state = rand_id ).fit(X) #can access by fit_transform return(mall) #see if the reconstruction error matches for the same error. #This will be used to obtain a new latent matrix for the mall def mall_latent_features(X, feat, n_topics=10, sparse_degree = 1): #This is used to get features for malls based off their topics #doing nmf on the topics mall = decomposition.NMF(n_components=n_topics, sparseness='components', beta=sparse_degree ).fit(feat.T) return(mall.components_.T) #This is the lambda function where you plug in stuff def mall_latent_helper(n_topics=10, sparse_degree = 1): return(lambda X, feat: mall_latent_features(X, feat, n_topics, sparse_degree)) #using an offset def get_topics(X, feature_names, n_topics=10, n_top_words=10, sparse_degree=1 ): #obtains the topics of nmf nmf = decomposition.NMF(n_components=n_topics, sparseness='components', beta=sparse_degree ).fit(X.T) #l1 sparseness for topic_idx, topic in enumerate(nmf.components_): print( "Topic #%d:" % topic_idx) print( " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print()
mit
brenoec/cefetmg.msc.influence.networks
simulation/pycxsimulator.py
1
12602
## "pycxsimulator.py" ## Realtime Simulation GUI for PyCX ## ## Developed by: ## Chun Wong ## email@chunwong.net ## ## Revised by: ## Hiroki Sayama ## sayama@binghamton.edu ## ## Copyright 2012 Chun Wong & Hiroki Sayama ## ## Simulation control & GUI extensions ## Copyright 2013 Przemyslaw Szufel & Bogumil Kaminski ## {pszufe, bkamins}@sgh.waw.pl ## ## Fixing errors due to "the grid and pack problem" by: ## Toshihiro Tanizawa ## tanizawa@ee.kochi-ct.ac.jp ## began at 2016-06-15(Wed) 17:10:17 ## fixed grid() and pack() problem on 2016-06-21(Tue) 18:29:40 ## ## The following two lines should be placed at the beginning of your simulator code: ## ## import matplotlib ## matplotlib.use('TkAgg') import pylab as PL import ttk from Tkinter import * from ttk import Notebook class GUI: ## GUI variables titleText = 'PyCX Simulator' # window title timeInterval = 0 # refresh time in milliseconds running = False modelFigure = None stepSize = 1 currentStep = 0 # Constructor def __init__(self, title='PyCX Simulator', interval=0, stepSize=1, parameterSetters=[]): self.titleText = title self.timeInterval = interval self.stepSize = stepSize self.parameterSetters = parameterSetters self.varEntries = {} self.statusStr = "" self.initGUI() # Initialization def initGUI(self): #create root window self.rootWindow = Tk() self.statusText = StringVar(value=self.statusStr) # at this point, statusStr = "" self.setStatusStr("Simulation not yet started") self.rootWindow.wm_title(self.titleText) # titleText = 'PyCX Simulator' self.rootWindow.protocol('WM_DELETE_WINDOW', self.quitGUI) self.rootWindow.geometry('450x300') self.rootWindow.columnconfigure(0, weight=1) self.rootWindow.rowconfigure(0, weight=1) self.notebook = Notebook(self.rootWindow) # self.notebook.grid(row=0,column=0,padx=2,pady=2,sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:30:25 self.notebook.pack(side=TOP, padx=2, pady=2) self.frameRun = Frame() self.frameSettings = Frame() self.frameParameters = Frame() self.frameInformation = Frame() self.notebook.add(self.frameRun,text="Run") self.notebook.add(self.frameSettings,text="Settings") self.notebook.add(self.frameParameters,text="Parameters") self.notebook.add(self.frameInformation,text="Info") self.notebook.pack(expand=NO, fill=BOTH, padx=5, pady=5 ,side=TOP) # self.notebook.grid(row=0, column=0, padx=5, pady=5, sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:31:02 self.status = Label(self.rootWindow, width=40,height=3, relief=SUNKEN, bd=1, textvariable=self.statusText) # self.status.grid(row=1,column=0,padx=5,pady=5,sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:31:17 self.status.pack(side=TOP, fill=X, padx=5, pady=5, expand=NO) # ----------------------------------- # frameRun # ----------------------------------- # buttonRun self.runPauseString = StringVar() self.runPauseString.set("Run") self.buttonRun = Button(self.frameRun,width=30,height=2,textvariable=self.runPauseString,command=self.runEvent) self.buttonRun.pack(side=TOP, padx=5, pady=5) self.showHelp(self.buttonRun,"Runs the simulation (or pauses the running simulation)") # buttonStep self.buttonStep = Button(self.frameRun,width=30,height=2,text='Step Once',command=self.stepOnce) self.buttonStep.pack(side=TOP, padx=5, pady=5) self.showHelp(self.buttonStep,"Steps the simulation only once") # buttonReset self.buttonReset = Button(self.frameRun,width=30,height=2,text='Reset',command=self.resetModel) self.buttonReset.pack(side=TOP, padx=5, pady=5) self.showHelp(self.buttonReset,"Resets the simulation") # ----------------------------------- # frameSettings # ----------------------------------- can = Canvas(self.frameSettings) lab = Label(can, width=25,height=1,text="Step size ", justify=LEFT, anchor=W,takefocus=0) lab.pack(side='left') self.stepScale = Scale(can,from_=1, to=50, resolution=1,command=self.changeStepSize,orient=HORIZONTAL, width=25,length=150) self.stepScale.set(self.stepSize) self.showHelp(self.stepScale,"Skips model redraw during every [n] simulation steps\nResults in a faster model run.") self.stepScale.pack(side='left') can.pack(side='top') can = Canvas(self.frameSettings) lab = Label(can, width=25,height=1,text="Step visualization delay in ms ", justify=LEFT, anchor=W,takefocus=0) lab.pack(side='left') self.stepDelay = Scale(can,from_=0, to=max(2000,self.timeInterval), resolution=10,command=self.changeStepDelay,orient=HORIZONTAL, width=25,length=150) self.stepDelay.set(self.timeInterval) self.showHelp(self.stepDelay,"The visualization of each step is delays by the given number of milliseconds.") self.stepDelay.pack(side='left') can.pack(side='top') # -------------------------------------------- # frameInformation # -------------------------------------------- scrollInfo = Scrollbar(self.frameInformation) self.textInformation = Text(self.frameInformation, width=45,height=13,bg='lightgray',wrap=WORD,font=("Courier",10)) scrollInfo.pack(side=RIGHT, fill=Y) self.textInformation.pack(side=LEFT,fill=BOTH,expand=YES) scrollInfo.config(command=self.textInformation.yview) self.textInformation.config(yscrollcommand=scrollInfo.set) # -------------------------------------------- # ParameterSetters # -------------------------------------------- for variableSetter in self.parameterSetters: can = Canvas(self.frameParameters) lab = Label(can, width=25,height=1,text=variableSetter.__name__+" ",anchor=W,takefocus=0) lab.pack(side='left') ent = Entry(can, width=11) ent.insert(0, str(variableSetter())) if variableSetter.__doc__ != None and len(variableSetter.__doc__) > 0: self.showHelp(ent,variableSetter.__doc__.strip()) ent.pack(side='left') can.pack(side='top') self.varEntries[variableSetter]=ent if len(self.parameterSetters) > 0: self.buttonSaveParameters = Button(self.frameParameters,width=50,height=1, command=self.saveParametersCmd,text="Save parameters to the running model",state=DISABLED) self.showHelp(self.buttonSaveParameters, "Saves the parameter values.\nNot all values may take effect on a running model\nA model reset might be required.") self.buttonSaveParameters.pack(side='top',padx=5,pady=5) self.buttonSaveParametersAndReset = Button(self.frameParameters,width=50,height=1, command=self.saveParametersAndResetCmd,text="Save parameters to the model and reset the model") self.showHelp(self.buttonSaveParametersAndReset,"Saves the given parameter values and resets the model") self.buttonSaveParametersAndReset.pack(side='top',padx=5,pady=5) # <<<<< Init >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def setStatusStr(self,newStatus): self.statusStr = newStatus self.statusText.set(self.statusStr) # model control functions for changing parameters def changeStepSize(self,val): self.stepSize = int(val) def changeStepDelay(self,val): self.timeInterval= int(val) def saveParametersCmd(self): for variableSetter in self.parameterSetters: variableSetter(float(self.varEntries[variableSetter].get())) self.setStatusStr("New parameter values have been set") def saveParametersAndResetCmd(self): self.saveParametersCmd() self.resetModel() # <<<< runEvent >>>>> # This event is envoked when "Run" button is clicked. def runEvent(self): self.running = not self.running if self.running: self.rootWindow.after(self.timeInterval,self.stepModel) self.runPauseString.set("Pause") self.buttonStep.configure(state=DISABLED) self.buttonReset.configure(state=DISABLED) if len(self.parameterSetters) > 0: self.buttonSaveParameters.configure(state=NORMAL) self.buttonSaveParametersAndReset.configure(state=DISABLED) else: self.runPauseString.set("Continue Run") self.buttonStep.configure(state=NORMAL) self.buttonReset.configure(state=NORMAL) if len(self.parameterSetters) > 0: self.buttonSaveParameters.configure(state=NORMAL) self.buttonSaveParametersAndReset.configure(state=NORMAL) def stepModel(self): if self.running: self.modelStepFunc() self.currentStep += 1 self.setStatusStr("Step "+str(self.currentStep)) self.status.configure(foreground='black') if (self.currentStep) % self.stepSize == 0: self.drawModel() self.rootWindow.after(int(self.timeInterval*1.0/self.stepSize),self.stepModel) def stepOnce(self): self.running = False self.runPauseString.set("Continue Run") self.modelStepFunc() self.currentStep += 1 self.setStatusStr("Step "+str(self.currentStep)) self.drawModel() if len(self.parameterSetters) > 0: self.buttonSaveParameters.configure(state=NORMAL) def resetModel(self): self.running = False self.runPauseString.set("Run") self.modelInitFunc() self.currentStep = 0; self.setStatusStr("Model has been reset") self.drawModel() def drawModel(self): PL.ion() # bug fix by Alex Hill in 2013 if self.modelFigure == None or self.modelFigure.canvas.manager.window == None: self.modelFigure = PL.figure() self.modelDrawFunc() self.modelFigure.canvas.manager.window.update() PL.show() # bug fix by Hiroki Sayama in 2016 def start(self,func=[]): if len(func)==3: self.modelInitFunc = func[0] self.modelDrawFunc = func[1] self.modelStepFunc = func[2] if (self.modelStepFunc.__doc__ != None and len(self.modelStepFunc.__doc__)>0): self.showHelp(self.buttonStep,self.modelStepFunc.__doc__.strip()) if (self.modelInitFunc.__doc__ != None and len(self.modelInitFunc.__doc__)>0): self.textInformation.config(state=NORMAL) self.textInformation.delete(1.0, END) self.textInformation.insert(END, self.modelInitFunc.__doc__.strip()) self.textInformation.config(state=DISABLED) self.modelInitFunc() self.drawModel() self.rootWindow.mainloop() def quitGUI(self): PL.close('all') self.rootWindow.quit() self.rootWindow.destroy() def showHelp(self, widget,text): def setText(self): self.statusText.set(text) self.status.configure(foreground='blue') def showHelpLeave(self): self.statusText.set(self.statusStr) self.status.configure(foreground='black') widget.bind("<Enter>", lambda e : setText(self)) widget.bind("<Leave>", lambda e : showHelpLeave(self))
mit
rdipietro/tensorflow
tensorflow/python/client/notebook.py
33
4608
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Notebook front-end to TensorFlow. When you run this binary, you'll see something like below, which indicates the serving URL of the notebook: The IPython Notebook is running at: http://127.0.0.1:8888/ Press "Shift+Enter" to execute a cell Press "Enter" on a cell to go into edit mode. Press "Escape" to go back into command mode and use arrow keys to navigate. Press "a" in command mode to insert cell above or "b" to insert cell below. Your root notebooks directory is FLAGS.notebook_dir """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import socket import sys # pylint: disable=g-import-not-at-top # Official recommended way of turning on fast protocol buffers as of 10/21/14 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp" os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2" from tensorflow.python.platform import app from tensorflow.python.platform import flags FLAGS = flags.FLAGS flags.DEFINE_string( "password", None, "Password to require. If set, the server will allow public access." " Only used if notebook config file does not exist.") flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks", "root location where to store notebooks") ORIG_ARGV = sys.argv # Main notebook process calls itself with argv[1]="kernel" to start kernel # subprocesses. IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel" def main(unused_argv): sys.argv = ORIG_ARGV if not IS_KERNEL: # Drop all flags. sys.argv = [sys.argv[0]] # NOTE(sadovsky): For some reason, putting this import at the top level # breaks inline plotting. It's probably a bug in the stone-age version of # matplotlib. from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top notebookapp = NotebookApp.instance() notebookapp.open_browser = True # password functionality adopted from quality/ranklab/main/tools/notebook.py # add options to run with "password" if FLAGS.password: from IPython.lib import passwd # pylint: disable=g-import-not-at-top notebookapp.ip = "0.0.0.0" notebookapp.password = passwd(FLAGS.password) else: print ("\nNo password specified; Notebook server will only be available" " on the local machine.\n") notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir]) if notebookapp.ip == "0.0.0.0": proto = "https" if notebookapp.certfile else "http" url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port, notebookapp.base_project_url) print("\nNotebook server will be publicly available at: %s\n" % url) notebookapp.start() return # Drop the --flagfile flag so that notebook doesn't complain about an # "unrecognized alias" when parsing sys.argv. sys.argv = ([sys.argv[0]] + [z for z in sys.argv[1:] if not z.startswith("--flagfile")]) from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top kernelapp = IPKernelApp.instance() kernelapp.initialize() # Enable inline plotting. Equivalent to running "%matplotlib inline". ipshell = kernelapp.shell ipshell.enable_matplotlib("inline") kernelapp.start() if __name__ == "__main__": # When the user starts the main notebook process, we don't touch sys.argv. # When the main process launches kernel subprocesses, it writes all flags # to a tmpfile and sets --flagfile to that tmpfile, so for kernel # subprocesses here we drop all flags *except* --flagfile, then call # app.run(), and then (in main) restore all flags before starting the # kernel app. if IS_KERNEL: # Drop everything except --flagfile. sys.argv = ([sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")]) app.run()
apache-2.0
tzulitai/flink
flink-python/pyflink/table/tests/test_pandas_udf.py
1
18807
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import datetime import decimal import unittest import pytz from pyflink.table import DataTypes, Row from pyflink.table.tests.test_udf import SubtractOne from pyflink.table.udf import udf from pyflink.testing import source_sink_utils from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \ PyFlinkBlinkBatchTableTestCase, PyFlinkBlinkStreamTableTestCase, PyFlinkBatchTableTestCase,\ exec_insert_table class PandasUDFTests(unittest.TestCase): def test_non_exist_udf_type(self): with self.assertRaisesRegex(ValueError, 'The udf_type must be one of \'general, pandas\''): udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), udf_type="non-exist") class PandasUDFITTests(object): def test_basic_functionality(self): # pandas UDF self.t_env.create_temporary_system_function( "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), udf_type="pandas")) self.t_env.create_temporary_system_function("add", add) # general Python UDF self.t_env.create_temporary_system_function( "subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd'], [DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c']) exec_insert_table( t.where("add_one(b) <= 3").select("a, b + 1, add(a + 1, subtract_one(c)) + 2, " "add(add_one(a), 1L)"), "Results") actual = source_sink_utils.results() self.assert_equals(actual, ["1,3,6,3", "3,2,14,5"]) def test_all_data_types(self): import pandas as pd import numpy as np def tinyint_func(tinyint_param): assert isinstance(tinyint_param, pd.Series) assert isinstance(tinyint_param[0], np.int8), \ 'tinyint_param of wrong type %s !' % type(tinyint_param[0]) return tinyint_param def smallint_func(smallint_param): assert isinstance(smallint_param, pd.Series) assert isinstance(smallint_param[0], np.int16), \ 'smallint_param of wrong type %s !' % type(smallint_param[0]) assert smallint_param[0] == 32767, 'smallint_param of wrong value %s' % smallint_param return smallint_param def int_func(int_param): assert isinstance(int_param, pd.Series) assert isinstance(int_param[0], np.int32), \ 'int_param of wrong type %s !' % type(int_param[0]) assert int_param[0] == -2147483648, 'int_param of wrong value %s' % int_param return int_param def bigint_func(bigint_param): assert isinstance(bigint_param, pd.Series) assert isinstance(bigint_param[0], np.int64), \ 'bigint_param of wrong type %s !' % type(bigint_param[0]) return bigint_param def boolean_func(boolean_param): assert isinstance(boolean_param, pd.Series) assert isinstance(boolean_param[0], np.bool_), \ 'boolean_param of wrong type %s !' % type(boolean_param[0]) return boolean_param def float_func(float_param): assert isinstance(float_param, pd.Series) assert isinstance(float_param[0], np.float32), \ 'float_param of wrong type %s !' % type(float_param[0]) return float_param def double_func(double_param): assert isinstance(double_param, pd.Series) assert isinstance(double_param[0], np.float64), \ 'double_param of wrong type %s !' % type(double_param[0]) return double_param def varchar_func(varchar_param): assert isinstance(varchar_param, pd.Series) assert isinstance(varchar_param[0], str), \ 'varchar_param of wrong type %s !' % type(varchar_param[0]) return varchar_param def varbinary_func(varbinary_param): assert isinstance(varbinary_param, pd.Series) assert isinstance(varbinary_param[0], bytes), \ 'varbinary_param of wrong type %s !' % type(varbinary_param[0]) return varbinary_param def decimal_func(decimal_param): assert isinstance(decimal_param, pd.Series) assert isinstance(decimal_param[0], decimal.Decimal), \ 'decimal_param of wrong type %s !' % type(decimal_param[0]) return decimal_param def date_func(date_param): assert isinstance(date_param, pd.Series) assert isinstance(date_param[0], datetime.date), \ 'date_param of wrong type %s !' % type(date_param[0]) return date_param def time_func(time_param): assert isinstance(time_param, pd.Series) assert isinstance(time_param[0], datetime.time), \ 'time_param of wrong type %s !' % type(time_param[0]) return time_param timestamp_value = datetime.datetime(1970, 1, 2, 0, 0, 0, 123000) def timestamp_func(timestamp_param): assert isinstance(timestamp_param, pd.Series) assert isinstance(timestamp_param[0], datetime.datetime), \ 'timestamp_param of wrong type %s !' % type(timestamp_param[0]) assert timestamp_param[0] == timestamp_value, \ 'timestamp_param is wrong value %s, should be %s!' % (timestamp_param[0], timestamp_value) return timestamp_param def array_func(array_param): assert isinstance(array_param, pd.Series) assert isinstance(array_param[0], np.ndarray), \ 'array_param of wrong type %s !' % type(array_param[0]) return array_param def nested_array_func(nested_array_param): assert isinstance(nested_array_param, pd.Series) assert isinstance(nested_array_param[0], np.ndarray), \ 'nested_array_param of wrong type %s !' % type(nested_array_param[0]) return pd.Series(nested_array_param[0]) def row_func(row_param): assert isinstance(row_param, pd.Series) assert isinstance(row_param[0], dict), \ 'row_param of wrong type %s !' % type(row_param[0]) return row_param self.t_env.create_temporary_system_function( "tinyint_func", udf(tinyint_func, result_type=DataTypes.TINYINT(), udf_type="pandas")) self.t_env.create_temporary_system_function( "smallint_func", udf(smallint_func, result_type=DataTypes.SMALLINT(), udf_type="pandas")) self.t_env.create_temporary_system_function( "int_func", udf(int_func, result_type=DataTypes.INT(), udf_type="pandas")) self.t_env.create_temporary_system_function( "bigint_func", udf(bigint_func, result_type=DataTypes.BIGINT(), udf_type="pandas")) self.t_env.create_temporary_system_function( "boolean_func", udf(boolean_func, result_type=DataTypes.BOOLEAN(), udf_type="pandas")) self.t_env.create_temporary_system_function( "float_func", udf(float_func, result_type=DataTypes.FLOAT(), udf_type="pandas")) self.t_env.create_temporary_system_function( "double_func", udf(double_func, result_type=DataTypes.DOUBLE(), udf_type="pandas")) self.t_env.create_temporary_system_function( "varchar_func", udf(varchar_func, result_type=DataTypes.STRING(), udf_type="pandas")) self.t_env.create_temporary_system_function( "varbinary_func", udf(varbinary_func, result_type=DataTypes.BYTES(), udf_type="pandas")) self.t_env.register_function( "decimal_func", udf(decimal_func, result_type=DataTypes.DECIMAL(38, 18), udf_type="pandas")) self.t_env.create_temporary_system_function( "date_func", udf(date_func, result_type=DataTypes.DATE(), udf_type="pandas")) self.t_env.create_temporary_system_function( "time_func", udf(time_func, result_type=DataTypes.TIME(), udf_type="pandas")) self.t_env.create_temporary_system_function( "timestamp_func", udf(timestamp_func, result_type=DataTypes.TIMESTAMP(3), udf_type="pandas")) self.t_env.create_temporary_system_function( "array_str_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.STRING()), udf_type="pandas")) self.t_env.create_temporary_system_function( "array_timestamp_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.TIMESTAMP(3)), udf_type="pandas")) self.t_env.create_temporary_system_function( "array_int_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.INT()), udf_type="pandas")) self.t_env.create_temporary_system_function( "nested_array_func", udf(nested_array_func, result_type=DataTypes.ARRAY(DataTypes.STRING()), udf_type="pandas")) row_type = DataTypes.ROW( [DataTypes.FIELD("f1", DataTypes.INT()), DataTypes.FIELD("f2", DataTypes.STRING()), DataTypes.FIELD("f3", DataTypes.TIMESTAMP(3)), DataTypes.FIELD("f4", DataTypes.ARRAY(DataTypes.INT()))]) self.t_env.create_temporary_system_function( "row_func", udf(row_func, result_type=row_type, udf_type="pandas")) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u'], [DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.BIGINT(), DataTypes.BOOLEAN(), DataTypes.BOOLEAN(), DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.BYTES(), DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18), DataTypes.DATE(), DataTypes.TIME(), DataTypes.TIMESTAMP(3), DataTypes.ARRAY(DataTypes.STRING()), DataTypes.ARRAY(DataTypes.TIMESTAMP(3)), DataTypes.ARRAY(DataTypes.INT()), DataTypes.ARRAY(DataTypes.STRING()), row_type]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements( [(1, 32767, -2147483648, 1, True, False, 1.0, 1.0, 'hello', '中文', bytearray(b'flink'), decimal.Decimal('1000000000000000000.05'), decimal.Decimal('1000000000000000000.05999999999999999899999999999'), datetime.date(2014, 9, 13), datetime.time(hour=1, minute=0, second=1), timestamp_value, ['hello', '中文', None], [timestamp_value], [1, 2], [['hello', '中文', None]], Row(1, 'hello', timestamp_value, [1, 2]))], DataTypes.ROW( [DataTypes.FIELD("a", DataTypes.TINYINT()), DataTypes.FIELD("b", DataTypes.SMALLINT()), DataTypes.FIELD("c", DataTypes.INT()), DataTypes.FIELD("d", DataTypes.BIGINT()), DataTypes.FIELD("e", DataTypes.BOOLEAN()), DataTypes.FIELD("f", DataTypes.BOOLEAN()), DataTypes.FIELD("g", DataTypes.FLOAT()), DataTypes.FIELD("h", DataTypes.DOUBLE()), DataTypes.FIELD("i", DataTypes.STRING()), DataTypes.FIELD("j", DataTypes.STRING()), DataTypes.FIELD("k", DataTypes.BYTES()), DataTypes.FIELD("l", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("m", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("n", DataTypes.DATE()), DataTypes.FIELD("o", DataTypes.TIME()), DataTypes.FIELD("p", DataTypes.TIMESTAMP(3)), DataTypes.FIELD("q", DataTypes.ARRAY(DataTypes.STRING())), DataTypes.FIELD("r", DataTypes.ARRAY(DataTypes.TIMESTAMP(3))), DataTypes.FIELD("s", DataTypes.ARRAY(DataTypes.INT())), DataTypes.FIELD("t", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))), DataTypes.FIELD("u", row_type)])) exec_insert_table(t.select("tinyint_func(a)," "smallint_func(b)," "int_func(c)," "bigint_func(d)," "boolean_func(e)," "boolean_func(f)," "float_func(g)," "double_func(h)," "varchar_func(i)," "varchar_func(j)," "varbinary_func(k)," "decimal_func(l)," "decimal_func(m)," "date_func(n)," "time_func(o)," "timestamp_func(p)," "array_str_func(q)," "array_timestamp_func(r)," "array_int_func(s)," "nested_array_func(t)," "row_func(u)"), "Results") actual = source_sink_utils.results() self.assert_equals(actual, ["1,32767,-2147483648,1,true,false,1.0,1.0,hello,中文," "[102, 108, 105, 110, 107],1000000000000000000.050000000000000000," "1000000000000000000.059999999999999999,2014-09-13,01:00:01," "1970-01-02 00:00:00.123,[hello, 中文, null],[1970-01-02 00:00:00.123]," "[1, 2],[hello, 中文, null],1,hello,1970-01-02 00:00:00.123,[1, 2]"]) class BlinkPandasUDFITTests(object): def test_data_types_only_supported_in_blink_planner(self): import pandas as pd timezone = self.t_env.get_config().get_local_timezone() local_datetime = pytz.timezone(timezone).localize( datetime.datetime(1970, 1, 2, 0, 0, 0, 123000)) def local_zoned_timestamp_func(local_zoned_timestamp_param): assert isinstance(local_zoned_timestamp_param, pd.Series) assert isinstance(local_zoned_timestamp_param[0], datetime.datetime), \ 'local_zoned_timestamp_param of wrong type %s !' % type( local_zoned_timestamp_param[0]) assert local_zoned_timestamp_param[0] == local_datetime, \ 'local_zoned_timestamp_param is wrong value %s, %s!' % \ (local_zoned_timestamp_param[0], local_datetime) return local_zoned_timestamp_param self.t_env.create_temporary_system_function( "local_zoned_timestamp_func", udf(local_zoned_timestamp_func, result_type=DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3), udf_type="pandas")) table_sink = source_sink_utils.TestAppendSink( ['a'], [DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3)]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements( [(local_datetime,)], DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))])) exec_insert_table(t.select("local_zoned_timestamp_func(local_zoned_timestamp_func(a))"), "Results") actual = source_sink_utils.results() self.assert_equals(actual, ["1970-01-02T00:00:00.123Z"]) class StreamPandasUDFITTests(PandasUDFITTests, PyFlinkStreamTableTestCase): pass class BatchPandasUDFITTests(PyFlinkBatchTableTestCase): def test_basic_functionality(self): self.t_env.create_temporary_system_function( "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), udf_type="pandas")) self.t_env.create_temporary_system_function("add", add) # general Python UDF self.t_env.create_temporary_system_function( "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT())) t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c']) t = t.where("add_one(b) <= 3") \ .select("a, b + 1, add(a + 1, subtract_one(c)) + 2, add(add_one(a), 1L)") result = self.collect(t) self.assert_equals(result, ["1,3,6,3", "3,2,14,5"]) class BlinkBatchPandasUDFITTests(PandasUDFITTests, BlinkPandasUDFITTests, PyFlinkBlinkBatchTableTestCase): pass class BlinkStreamPandasUDFITTests(PandasUDFITTests, BlinkPandasUDFITTests, PyFlinkBlinkStreamTableTestCase): pass @udf(result_type=DataTypes.BIGINT(), udf_type='pandas') def add(i, j): return i + j if __name__ == '__main__': import unittest try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
IssamLaradji/scikit-learn
sklearn/tests/test_common.py
5
16372
""" General tests for all estimators in sklearn. """ # Authors: Andreas Mueller <amueller@ais.uni-bonn.de> # Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause from __future__ import print_function import os import warnings import sys import pkgutil from sklearn.externals.six import PY3 from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_false, clean_warning_registry from sklearn.utils.testing import all_estimators from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_in from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings import sklearn from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_classification from sklearn.cross_validation import train_test_split from sklearn.linear_model.base import LinearClassifierMixin from sklearn.utils.estimator_checks import ( check_parameters_default_constructible, check_regressors_classifiers_sparse_data, check_transformer, check_clustering, check_regressors_int, check_regressors_train, check_regressors_pickle, check_transformer_sparse_data, check_transformer_pickle, check_estimators_nan_inf, check_classifiers_one_label, check_classifiers_train, check_classifiers_classes, check_classifiers_input_shapes, check_classifiers_pickle, check_class_weight_classifiers, check_class_weight_auto_classifiers, check_class_weight_auto_linear_classifier, check_estimators_overwrite_params, check_estimators_partial_fit_n_features, check_cluster_overwrite_params, check_sparsify_binary_classifier, check_sparsify_multiclass_classifier, check_classifier_data_not_an_array, check_regressor_data_not_an_array, check_transformer_data_not_an_array, check_transformer_n_iter, check_non_transformer_estimators_n_iter, CROSS_DECOMPOSITION) def test_all_estimator_no_base_class(): # test that all_estimators doesn't find abstract classes. for name, Estimator in all_estimators(): msg = ("Base estimators such as {0} should not be included" " in all_estimators").format(name) assert_false(name.lower().startswith('base'), msg=msg) def test_all_estimators(): # Test that estimators are default-constructible, clonable # and have working repr. estimators = all_estimators(include_meta_estimators=True) # Meta sanity-check to make sure that the estimator introspection runs # properly assert_greater(len(estimators), 0) for name, Estimator in estimators: # some can just not be sensibly default constructed yield check_parameters_default_constructible, name, Estimator def test_estimators_sparse_data(): # All estimators should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message estimators = all_estimators() estimators = [(name, Estimator) for name, Estimator in estimators if issubclass(Estimator, (ClassifierMixin, RegressorMixin))] for name, Estimator in estimators: yield check_regressors_classifiers_sparse_data, name, Estimator def test_transformers(): # test if transformers do something sensible on training set # also test all shapes / shape errors transformers = all_estimators(type_filter='transformer') for name, Transformer in transformers: # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message yield check_transformer_sparse_data, name, Transformer yield check_transformer_pickle, name, Transformer if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array, name, Transformer # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']: # basic tests yield check_transformer, name, Transformer def test_estimators_nan_inf(): # Test that all estimators check their input for NaN's and infs estimators = all_estimators() estimators = [(name, E) for name, E in estimators if (issubclass(E, ClassifierMixin) or issubclass(E, RegressorMixin) or issubclass(E, TransformerMixin) or issubclass(E, ClusterMixin))] for name, Estimator in estimators: if name not in CROSS_DECOMPOSITION + ['Imputer']: yield check_estimators_nan_inf, name, Estimator def test_clustering(): # test if clustering algorithms do something sensible # also test all shapes / shape errors clustering = all_estimators(type_filter='cluster') for name, Alg in clustering: # test whether any classifier overwrites his init parameters during fit yield check_cluster_overwrite_params, name, Alg if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering, name, Alg yield check_estimators_partial_fit_n_features, name, Alg def test_classifiers(): # test if classifiers can cope with non-consecutive classes classifiers = all_estimators(type_filter='classifier') for name, Classifier in classifiers: # test classfiers can handle non-array data yield check_classifier_data_not_an_array, name, Classifier # test classifiers trained on a single label always return this label yield check_classifiers_one_label, name, Classifier yield check_classifiers_classes, name, Classifier yield check_classifiers_pickle, name, Classifier yield check_estimators_partial_fit_n_features, name, Classifier # basic consistency testing yield check_classifiers_train, name, Classifier if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"] # TODO some complication with -1 label and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. # test if classifiers can cope with y.shape = (n_samples, 1) yield check_classifiers_input_shapes, name, Classifier def test_regressors(): regressors = all_estimators(type_filter='regressor') # TODO: test with intercept # TODO: test with multiple responses for name, Regressor in regressors: # basic testing yield check_regressors_train, name, Regressor yield check_regressor_data_not_an_array, name, Regressor yield check_estimators_partial_fit_n_features, name, Regressor # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_regressors_pickle, name, Regressor if name != 'CCA': # check that the regressor handles int input yield check_regressors_int, name, Regressor def test_configure(): # Smoke test the 'configure' step of setup, this tests all the # 'configure' functions in the setup.pys in the scikit cwd = os.getcwd() setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..')) setup_filename = os.path.join(setup_path, 'setup.py') if not os.path.exists(setup_filename): return try: os.chdir(setup_path) old_argv = sys.argv sys.argv = ['setup.py', 'config'] clean_warning_registry() with warnings.catch_warnings(): # The configuration spits out warnings when not finding # Blas/Atlas development headers warnings.simplefilter('ignore', UserWarning) if PY3: with open('setup.py') as f: exec(f.read(), dict(__name__='__main__')) else: execfile('setup.py', dict(__name__='__main__')) finally: sys.argv = old_argv os.chdir(cwd) def test_class_weight_classifiers(): # test that class_weight works and that the semantics are consistent classifiers = all_estimators(type_filter='classifier') clean_warning_registry() with warnings.catch_warnings(record=True): classifiers = [c for c in classifiers if 'class_weight' in c[1]().get_params().keys()] for name, Classifier in classifiers: if name == "NuSVC": # the sparse version has a parameter that doesn't do anything continue if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! continue yield check_class_weight_classifiers, name, Classifier def test_class_weight_auto_classifiers(): """Test that class_weight="auto" improves f1-score""" # This test is broken; its success depends on: # * a rare fortuitous RNG seed for make_classification; and # * the use of binary F1 over a seemingly arbitrary positive class for two # datasets, and weighted average F1 for the third. # Its expectations need to be clarified and reimplemented. raise SkipTest('This test requires redefinition') classifiers = all_estimators(type_filter='classifier') clean_warning_registry() with warnings.catch_warnings(record=True): classifiers = [c for c in classifiers if 'class_weight' in c[1]().get_params().keys()] for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]): # create unbalanced dataset X, y = make_classification(n_classes=n_classes, n_samples=200, n_features=10, weights=weights, random_state=0, n_informative=n_classes) X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) for name, Classifier in classifiers: if (name != "NuSVC" # the sparse version has a parameter that doesn't do anything and not name.startswith("RidgeClassifier") # RidgeClassifier behaves unexpected # FIXME! and not name.endswith("NB")): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! yield (check_class_weight_auto_classifiers, name, Classifier, X_train, y_train, X_test, y_test, weights) def test_class_weight_auto_linear_classifiers(): classifiers = all_estimators(type_filter='classifier') clean_warning_registry() with warnings.catch_warnings(record=True): linear_classifiers = [ (name, clazz) for name, clazz in classifiers if 'class_weight' in clazz().get_params().keys() and issubclass(clazz, LinearClassifierMixin)] for name, Classifier in linear_classifiers: if name == "LogisticRegressionCV": # Contrary to RidgeClassifierCV, LogisticRegressionCV use actual # CV folds and fit a model for each CV iteration before averaging # the coef. Therefore it is expected to not behave exactly as the # other linear model. continue yield check_class_weight_auto_linear_classifier, name, Classifier def test_estimators_overwrite_params(): # test whether any classifier overwrites his init parameters during fit for est_type in ["classifier", "regressor", "transformer"]: estimators = all_estimators(type_filter=est_type) for name, Estimator in estimators: if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression', 'PLSSVD', 'GaussianProcess']): # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params, name, Estimator @ignore_warnings def test_import_all_consistency(): # Smoke test to check that any name in a __all__ list is actually defined # in the namespace of the module or package. pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.', onerror=lambda _: None) submods = [modname for _, modname, _ in pkgs] for modname in submods + ['sklearn']: if ".tests." in modname: continue package = __import__(modname, fromlist="dummy") for name in getattr(package, '__all__', ()): if getattr(package, name, None) is None: raise AttributeError( "Module '{0}' has no attribute '{1}'".format( modname, name)) def test_root_import_all_completeness(): EXCEPTIONS = ('utils', 'tests', 'base', 'setup') for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__, onerror=lambda _: None): if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS: continue assert_in(modname, sklearn.__all__) def test_sparsify_estimators(): #Test if predict with sparsified estimators works. #Tests regression, binary classification, and multi-class classification. estimators = all_estimators() # test regression and binary classification for name, Estimator in estimators: try: Estimator.sparsify yield check_sparsify_binary_classifier, name, Estimator except: pass # test multiclass classification classifiers = all_estimators(type_filter='classifier') for name, Classifier in classifiers: try: Classifier.sparsify yield check_sparsify_multiclass_classifier, name, Classifier except: pass def test_non_transformer_estimators_n_iter(): # Test that all estimators of type which are non-transformer # and which have an attribute of max_iter, return the attribute # of n_iter atleast 1. for est_type in ['regressor', 'classifier', 'cluster']: regressors = all_estimators(type_filter=est_type) for name, Estimator in regressors: # LassoLars stops early for the default alpha=1.0 for # the iris dataset. if name == 'LassoLars': estimator = Estimator(alpha=0.) else: estimator = Estimator() if hasattr(estimator, "max_iter"): # These models are dependent on external solvers like # libsvm and accessing the iter parameter is non-trivial. if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC', 'RidgeClassifier', 'SVC', 'RandomizedLasso', 'LogisticRegressionCV']): continue # Tested in test_transformer_n_iter below elif name in CROSS_DECOMPOSITION or ( name in ['LinearSVC', 'LogisticRegression'] ): continue else: # Multitask models related to ENet cannot handle # if y is mono-output. yield (check_non_transformer_estimators_n_iter, name, estimator, 'Multi' in name) def test_transformer_n_iter(): transformers = all_estimators(type_filter='transformer') for name, Estimator in transformers: estimator = Estimator() # Dependent on external solvers and hence accessing the iter # param is non-trivial. external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding', 'RandomizedLasso', 'LogisticRegressionCV'] if hasattr(estimator, "max_iter") and name not in external_solver: yield check_transformer_n_iter, name, estimator
bsd-3-clause
sknepneklab/SAMoS
analysis/plot_analysis_nematic/angle_plot_pretty_phi.py
1
7874
# * ************************************************************* # * # * Soft Active Mater on Surfaces (SAMoS) # * # * Author: Rastko Sknepnek # * # * Division of Physics # * School of Engineering, Physics and Mathematics # * University of Dundee # * # * (c) 2013, 2014 # * # * School of Science and Engineering # * School of Life Sciences # * University of Dundee # * # * (c) 2015 # * # * Author: Silke Henkes # * # * Department of Physics # * Institute for Complex Systems and Mathematical Biology # * University of Aberdeen # * # * (c) 2014, 2015 # * # * This program cannot be used, copied, or modified without # * explicit written permission of the authors. # * # * *************************************************************** import sys, os, glob import cPickle as pickle import numpy as np import scipy as sp #from scipy.io import savemat import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap #from matplotlib import rc import matplotlib #from mpl_toolkits.mplot3d import Axes3D import sys #import argparse from read_data import * #import numpy as np import numpy.linalg as lin #import matplotlib.pyplot as plt import math as m from datetime import * # setting global parameters #matplotlib.rcParams['text.usetex'] = 'true' matplotlib.rcParams['lines.linewidth'] = 2 matplotlib.rcParams['axes.linewidth'] = 2 matplotlib.rcParams['xtick.major.size'] = 8 matplotlib.rcParams['ytick.major.size'] = 8 matplotlib.rcParams['font.size']=20 matplotlib.rcParams['legend.fontsize']=14 cdict = {'red': [(0.0, 0.75, 0.75), (0.3, 1.0, 1.0), (0.5, 0.4, 0.0), (1.0, 0.0, 0.0)], 'green': [(0.0, 0.0, 0.0), (0.25, 0.0, 0.5), (0.5, 1.0, 1.0), (0.75, 0.5, 0.0), (1.0, 0.0, 0.0)], 'blue': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (0.7, 1.0, 1.0), (1.0, 0.25, 0.25)]} # This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...) basefolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/' #vList=['0.2','0.3','0.5','0.7','1.0','1.5','2.0','3.0','5.0','7.0','10.0'] JList=['0.01','0.05','0.1','0.5','5.0','10.0'] #vList=['0.2','1.0','5.0'] #RList=['5.0','6.0','7.0','8.0','9.0','10.0','12.0','14.0','16.0','18.0','20.0','25.0','30.0','40.0'] phiList=['0.75','1.0'] R='16' vmap=LinearSegmentedColormap('test',cdict,N=len(JList)) Rmap=LinearSegmentedColormap('test',cdict,N=len(phiList)) nsnap=2020 nskip=1000 nskip=500 step=5000 dt=0.001 r=0 Jval=np.zeros((len(JList),)) avtot=np.zeros((len(phiList),len(JList))) davtot=np.zeros((len(phiList),len(JList))) defectmat=np.empty((nsnap-nskip,4,3)) angles=np.zeros((nsnap-nskip,6)) avang=np.zeros((nsnap-nskip)) ang_correl=np.zeros((nsnap-nskip)) stdang=np.zeros((nsnap-nskip)) bins=np.linspace(0,180,45) dbin=bins[1]-bins[0] for phi in phiList: v=0 plt.figure(figsize=(10,7),linewidth=2.0) for J in JList: print phi,J #/home/silke/Documents/CurrentProjects/Rastko/nematic/data/phi_0.75/defects_phi0.75/defects_J_0.01_R_16.dat infile= basefolder +'/phi_' + phi+'/defects_phi' + phi + '/defects_J_' + J + '_R_' + R +'.dat' # header='theta rho vel energy pressure alpha alpha_v' datamat=(sp.loadtxt(infile, unpack=True)[:,(nskip+1):]).T ndefect=datamat[:,0] # This includes potential zeros. Be very careful in the subsequent analysis # Currently divides by the absolute value of the first element. Which should be fine, but a bit imprecise ... defectmat[:,0,:] = datamat[:,1:4]/lin.norm(datamat[0,1:4]) defectmat[:,1,:] = datamat[:,4:7]/lin.norm(datamat[0,4:7]) defectmat[:,2,:] = datamat[:,7:10]/lin.norm(datamat[0,7:10]) defectmat[:,3,:] = datamat[:,10:14]/lin.norm(datamat[0,10:14]) # Defined for two defects angles[:,0] = np.degrees(np.arccos(np.sum(defectmat[:,0,:]*defectmat[:,1,:],axis=1))) # Defined for three defects angles[:,1] = np.degrees(np.arccos(np.sum(defectmat[:,0,:]*defectmat[:,2,:],axis=1))) angles[:,2] = np.degrees(np.arccos(np.sum(defectmat[:,1,:]*defectmat[:,2,:],axis=1))) # Defined for four defects angles[:,3] = np.degrees(np.arccos(np.sum(defectmat[:,0,:]*defectmat[:,3,:],axis=1))) angles[:,4] = np.degrees(np.arccos(np.sum(defectmat[:,1,:]*defectmat[:,3,:],axis=1))) angles[:,5] = np.degrees(np.arccos(np.sum(defectmat[:,2,:]*defectmat[:,3,:],axis=1))) # First stop: Correlations on the mean angle # The whole mean angle story only makes proper sense for four of them # Start with that, at least isfour = [index for index,value in enumerate(ndefect) if value >=4] isthree = [index for index,value in enumerate(ndefect.astype(int)) if value ==3] istwo = [index for index,value in enumerate(ndefect.astype(int)) if value ==2] # One defect is useless, physically impossible and I can't calculate angles anyway isnil = [index for index,value in enumerate(ndefect) if value <=1] print "System spends a fraction " + str(len(isfour)/(1.0*(nsnap-nskip))) +" with four defects, " + str(len(isthree)/(1.0*(nsnap-nskip))) + " with three defects, " print str(len(istwo)/(1.0*(nsnap-nskip))) + " with two defects and " + str(len(isnil)/(1.0*(nsnap-nskip))) + " with one defect or less." # only fill where defined avang[isfour]=np.mean(angles[isfour,:],axis=1) stdang[isfour]=np.std(angles[isfour,:],axis=1) avang[isthree]=np.mean(angles[isthree,0:2],axis=1) stdang[isthree]=np.std(angles[isthree,0:2],axis=1) avang[istwo]=angles[istwo,0] if len(isfour)>0: avtot[r,v]=np.mean(avang[isfour]) # Mean of the variance: how far out ar the other angles? #davtot[r,v]=np.mean(stdang[isfour]) # Variance of the mean: how much fluctuations are there? davtot[r,v]=np.std(avang[isfour]) # Plotting as desired time=np.linspace(nskip*dt*step,nsnap*dt*step,nsnap-nskip) #plt.plot(time,avang,'-',color=vmap(v),label=JList[v]) #plt.plot(time[isfour],avang[isfour],'-',color=vmap(v),label=JList[v]) # Calculate correlation function - but only if were are on four angles throughout. # Deal with the rest later ... complex situation # Same thing with the histogram - the whole flat vs. tetrahedron discussion is for those anyway if len(isfour)>(nsnap-nskip)/2.0: # Angular correlation # Nope, this misses the normalization ... avshift=avang-np.mean(avang) #ang_correl = np.correlate(avang-np.mean(avang),avang-np.mean(avang),mode='full') #ang_correl = ang_correl[:(nsnap-nskip)] #ang_correl /= ang_correl[0] for u in range(nsnap-nskip): ang_correl[u]=np.mean(avshift[0:(nsnap-nskip-u)]*avshift[u:]) ang_correl /= ang_correl[0] time=np.linspace(0,(nsnap-nskip)*dt*step,nsnap-nskip) # Plotting for both (comment out as desired) plt.plot(time,ang_correl,'-',color=vmap(v),label=JList[v]) # Histogram ang_hist,bin_edges =np.histogram(np.ravel(angles[isfour,:]),bins=bins,density=True) #plt.plot(bins[:44]+dbin/2,ang_hist,'.-',color=vmap(v),label=JList[v]) Jval[v]=float(JList[v]) v+=1 ## angles plt.xlabel('time') plt.ylabel('angle') ## correlations #plt.xlabel('time') #plt.ylabel('C(t)') #plt.legend(loc=3,ncol=2) #plt.xlim(0,500) #plt.ylim(-0.5,1.05) # Histograms #plt.xlabel('angle') #plt.ylabel('P(angle)') plt.legend(loc=1,ncol=2) #plt.xlim(0,500) #plt.ylim(-0.5,1.05) plt.title('Density ' + str(phi)) r+=1 #plt.figure(figsize=(10,7),linewidth=2.0) #for r in range(len(RList)): #plt.errorbar(vval,avtot[r,:],yerr=davtot[r,:],color=Rmap(r),marker='o',label='R=' + RList[r]) #plt.plot(vval,109.47*vval/vval,'k--') #plt.xlim(0,2) #plt.ylim(0,140) #plt.xlabel('v_0') #plt.ylabel('angle') #plt.legend(loc=3,ncol=1) plt.show()
gpl-3.0
alanmcruickshank/superset-dev
tests/viz_tests.py
1
23862
from datetime import datetime import unittest from mock import Mock, patch import pandas as pd import superset.utils as utils from superset.utils import DTTM_ALIAS import superset.viz as viz class BaseVizTestCase(unittest.TestCase): def test_constructor_exception_no_datasource(self): form_data = {} datasource = None with self.assertRaises(Exception): viz.BaseViz(datasource, form_data) def test_get_fillna_returns_default_on_null_columns(self): form_data = { 'viz_type': 'table', 'token': '12345', } datasource = {'type': 'table'} test_viz = viz.BaseViz(datasource, form_data) self.assertEqual( test_viz.default_fillna, test_viz.get_fillna_for_columns(), ) def test_get_df_returns_empty_df(self): datasource = Mock() datasource.type = 'table' mock_dttm_col = Mock() mock_dttm_col.python_date_format = Mock() datasource.get_col = Mock(return_value=mock_dttm_col) form_data = {'dummy': 123} query_obj = {'granularity': 'day'} results = Mock() results.query = Mock() results.status = Mock() results.error_message = None results.df = Mock() results.df.empty = True datasource.query = Mock(return_value=results) test_viz = viz.BaseViz(datasource, form_data) result = test_viz.get_df(query_obj) self.assertEqual(type(result), pd.DataFrame) self.assertTrue(result.empty) self.assertEqual(test_viz.error_message, 'No data.') self.assertEqual(test_viz.status, utils.QueryStatus.FAILED) def test_get_df_handles_dttm_col(self): datasource = Mock() datasource.type = 'table' datasource.offset = 1 mock_dttm_col = Mock() mock_dttm_col.python_date_format = 'epoch_ms' datasource.get_col = Mock(return_value=mock_dttm_col) form_data = {'dummy': 123} query_obj = {'granularity': 'day'} results = Mock() results.query = Mock() results.status = Mock() results.error_message = Mock() df = Mock() df.columns = [DTTM_ALIAS] f_datetime = datetime(1960, 1, 1, 5, 0) df.__getitem__ = Mock(return_value=pd.Series([f_datetime])) df.__setitem__ = Mock() df.replace = Mock() df.fillna = Mock() results.df = df results.df.empty = False datasource.query = Mock(return_value=results) test_viz = viz.BaseViz(datasource, form_data) test_viz.get_fillna_for_columns = Mock(return_value=0) test_viz.get_df(query_obj) mock_call = df.__setitem__.mock_calls[0] self.assertEqual(mock_call[1][0], DTTM_ALIAS) self.assertFalse(mock_call[1][1].empty) self.assertEqual(mock_call[1][1][0], f_datetime) mock_call = df.__setitem__.mock_calls[1] self.assertEqual(mock_call[1][0], DTTM_ALIAS) self.assertEqual(mock_call[1][1][0].hour, 6) self.assertEqual(mock_call[1][1].dtype, 'datetime64[ns]') mock_dttm_col.python_date_format = 'utc' test_viz.get_df(query_obj) mock_call = df.__setitem__.mock_calls[2] self.assertEqual(mock_call[1][0], DTTM_ALIAS) self.assertFalse(mock_call[1][1].empty) self.assertEqual(mock_call[1][1][0].hour, 6) mock_call = df.__setitem__.mock_calls[3] self.assertEqual(mock_call[1][0], DTTM_ALIAS) self.assertEqual(mock_call[1][1][0].hour, 7) self.assertEqual(mock_call[1][1].dtype, 'datetime64[ns]') def test_cache_timeout(self): datasource = Mock() form_data = {'cache_timeout': '10'} test_viz = viz.BaseViz(datasource, form_data) self.assertEqual(10, test_viz.cache_timeout) del form_data['cache_timeout'] datasource.cache_timeout = 156 self.assertEqual(156, test_viz.cache_timeout) datasource.cache_timeout = None datasource.database = Mock() datasource.database.cache_timeout = 1666 self.assertEqual(1666, test_viz.cache_timeout) class TableVizTestCase(unittest.TestCase): def test_get_data_applies_percentage(self): form_data = { 'percent_metrics': ['sum__A', 'avg__B'], 'metrics': ['sum__A', 'count', 'avg__C'], } datasource = Mock() raw = {} raw['sum__A'] = [15, 20, 25, 40] raw['avg__B'] = [10, 20, 5, 15] raw['avg__C'] = [11, 22, 33, 44] raw['count'] = [6, 7, 8, 9] raw['groupA'] = ['A', 'B', 'C', 'C'] raw['groupB'] = ['x', 'x', 'y', 'z'] df = pd.DataFrame(raw) test_viz = viz.TableViz(datasource, form_data) data = test_viz.get_data(df) # Check method correctly transforms data and computes percents self.assertEqual(set([ 'groupA', 'groupB', 'count', 'sum__A', 'avg__C', '%sum__A', '%avg__B', ]), set(data['columns'])) expected = [ { 'groupA': 'A', 'groupB': 'x', 'count': 6, 'sum__A': 15, 'avg__C': 11, '%sum__A': 0.15, '%avg__B': 0.2, }, { 'groupA': 'B', 'groupB': 'x', 'count': 7, 'sum__A': 20, 'avg__C': 22, '%sum__A': 0.2, '%avg__B': 0.4, }, { 'groupA': 'C', 'groupB': 'y', 'count': 8, 'sum__A': 25, 'avg__C': 33, '%sum__A': 0.25, '%avg__B': 0.1, }, { 'groupA': 'C', 'groupB': 'z', 'count': 9, 'sum__A': 40, 'avg__C': 44, '%sum__A': 0.40, '%avg__B': 0.3, }, ] self.assertEqual(expected, data['records']) @patch('superset.viz.BaseViz.query_obj') def test_query_obj_merges_percent_metrics(self, super_query_obj): datasource = Mock() form_data = { 'percent_metrics': ['sum__A', 'avg__B', 'max__Y'], 'metrics': ['sum__A', 'count', 'avg__C'], } test_viz = viz.TableViz(datasource, form_data) f_query_obj = { 'metrics': form_data['metrics'], } super_query_obj.return_value = f_query_obj query_obj = test_viz.query_obj() self.assertEqual([ 'sum__A', 'count', 'avg__C', 'avg__B', 'max__Y', ], query_obj['metrics']) @patch('superset.viz.BaseViz.query_obj') def test_query_obj_throws_columns_and_metrics(self, super_query_obj): datasource = Mock() form_data = { 'all_columns': ['A', 'B'], 'metrics': ['x', 'y'], } super_query_obj.return_value = {} test_viz = viz.TableViz(datasource, form_data) with self.assertRaises(Exception): test_viz.query_obj() del form_data['metrics'] form_data['groupby'] = ['B', 'C'] test_viz = viz.TableViz(datasource, form_data) with self.assertRaises(Exception): test_viz.query_obj() @patch('superset.viz.BaseViz.query_obj') def test_query_obj_merges_all_columns(self, super_query_obj): datasource = Mock() form_data = { 'all_columns': ['colA', 'colB', 'colC'], 'order_by_cols': ['["colA", "colB"]', '["colC"]'], } super_query_obj.return_value = { 'columns': ['colD', 'colC'], 'groupby': ['colA', 'colB'], } test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual(form_data['all_columns'], query_obj['columns']) self.assertEqual([], query_obj['groupby']) self.assertEqual([['colA', 'colB'], ['colC']], query_obj['orderby']) @patch('superset.viz.BaseViz.query_obj') def test_query_obj_uses_sortby(self, super_query_obj): datasource = Mock() form_data = { 'timeseries_limit_metric': '__time__', 'order_desc': False, } super_query_obj.return_value = { 'metrics': ['colA', 'colB'], } test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual([ 'colA', 'colB', '__time__', ], query_obj['metrics']) self.assertEqual([( '__time__', True, )], query_obj['orderby']) def test_should_be_timeseries_raises_when_no_granularity(self): datasource = Mock() form_data = {'include_time': True} test_viz = viz.TableViz(datasource, form_data) with self.assertRaises(Exception): test_viz.should_be_timeseries() class PairedTTestTestCase(unittest.TestCase): def test_get_data_transforms_dataframe(self): form_data = { 'groupby': ['groupA', 'groupB', 'groupC'], 'metrics': ['metric1', 'metric2', 'metric3'], } datasource = {'type': 'table'} # Test data raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) pairedTTestViz = viz.viz_types['paired_ttest'](datasource, form_data) data = pairedTTestViz.get_data(df) # Check method correctly transforms data expected = { 'metric1': [ { 'values': [ {'x': 100, 'y': 1}, {'x': 200, 'y': 2}, {'x': 300, 'y': 3}], 'group': ('a1', 'a2', 'a3'), }, { 'values': [ {'x': 100, 'y': 4}, {'x': 200, 'y': 5}, {'x': 300, 'y': 6}], 'group': ('b1', 'b2', 'b3'), }, { 'values': [ {'x': 100, 'y': 7}, {'x': 200, 'y': 8}, {'x': 300, 'y': 9}], 'group': ('c1', 'c2', 'c3'), }, ], 'metric2': [ { 'values': [ {'x': 100, 'y': 10}, {'x': 200, 'y': 20}, {'x': 300, 'y': 30}], 'group': ('a1', 'a2', 'a3'), }, { 'values': [ {'x': 100, 'y': 40}, {'x': 200, 'y': 50}, {'x': 300, 'y': 60}], 'group': ('b1', 'b2', 'b3'), }, { 'values': [ {'x': 100, 'y': 70}, {'x': 200, 'y': 80}, {'x': 300, 'y': 90}], 'group': ('c1', 'c2', 'c3'), }, ], 'metric3': [ { 'values': [ {'x': 100, 'y': 100}, {'x': 200, 'y': 200}, {'x': 300, 'y': 300}], 'group': ('a1', 'a2', 'a3'), }, { 'values': [ {'x': 100, 'y': 400}, {'x': 200, 'y': 500}, {'x': 300, 'y': 600}], 'group': ('b1', 'b2', 'b3'), }, { 'values': [ {'x': 100, 'y': 700}, {'x': 200, 'y': 800}, {'x': 300, 'y': 900}], 'group': ('c1', 'c2', 'c3'), }, ], } self.assertEqual(data, expected) def test_get_data_empty_null_keys(self): form_data = { 'groupby': [], 'metrics': ['', None], } datasource = {'type': 'table'} # Test data raw = {} raw[DTTM_ALIAS] = [100, 200, 300] raw[''] = [1, 2, 3] raw[None] = [10, 20, 30] df = pd.DataFrame(raw) pairedTTestViz = viz.viz_types['paired_ttest'](datasource, form_data) data = pairedTTestViz.get_data(df) # Check method correctly transforms data expected = { 'N/A': [ { 'values': [ {'x': 100, 'y': 1}, {'x': 200, 'y': 2}, {'x': 300, 'y': 3}], 'group': 'All', }, ], 'NULL': [ { 'values': [ {'x': 100, 'y': 10}, {'x': 200, 'y': 20}, {'x': 300, 'y': 30}], 'group': 'All', }, ], } self.assertEqual(data, expected) class PartitionVizTestCase(unittest.TestCase): @patch('superset.viz.BaseViz.query_obj') def test_query_obj_time_series_option(self, super_query_obj): datasource = Mock() form_data = {} test_viz = viz.PartitionViz(datasource, form_data) super_query_obj.return_value = {} query_obj = test_viz.query_obj() self.assertFalse(query_obj['is_timeseries']) test_viz.form_data['time_series_option'] = 'agg_sum' query_obj = test_viz.query_obj() self.assertTrue(query_obj['is_timeseries']) def test_levels_for_computes_levels(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) groups = ['groupA', 'groupB', 'groupC'] time_op = 'agg_sum' test_viz = viz.PartitionViz(Mock(), {}) levels = test_viz.levels_for(time_op, groups, df) self.assertEqual(4, len(levels)) expected = { DTTM_ALIAS: 1800, 'metric1': 45, 'metric2': 450, 'metric3': 4500, } self.assertEqual(expected, levels[0].to_dict()) expected = { DTTM_ALIAS: {'a1': 600, 'b1': 600, 'c1': 600}, 'metric1': {'a1': 6, 'b1': 15, 'c1': 24}, 'metric2': {'a1': 60, 'b1': 150, 'c1': 240}, 'metric3': {'a1': 600, 'b1': 1500, 'c1': 2400}, } self.assertEqual(expected, levels[1].to_dict()) self.assertEqual(['groupA', 'groupB'], levels[2].index.names) self.assertEqual( ['groupA', 'groupB', 'groupC'], levels[3].index.names, ) time_op = 'agg_mean' levels = test_viz.levels_for(time_op, groups, df) self.assertEqual(4, len(levels)) expected = { DTTM_ALIAS: 200.0, 'metric1': 5.0, 'metric2': 50.0, 'metric3': 500.0, } self.assertEqual(expected, levels[0].to_dict()) expected = { DTTM_ALIAS: {'a1': 200, 'c1': 200, 'b1': 200}, 'metric1': {'a1': 2, 'b1': 5, 'c1': 8}, 'metric2': {'a1': 20, 'b1': 50, 'c1': 80}, 'metric3': {'a1': 200, 'b1': 500, 'c1': 800}, } self.assertEqual(expected, levels[1].to_dict()) self.assertEqual(['groupA', 'groupB'], levels[2].index.names) self.assertEqual( ['groupA', 'groupB', 'groupC'], levels[3].index.names, ) def test_levels_for_diff_computes_difference(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) groups = ['groupA', 'groupB', 'groupC'] test_viz = viz.PartitionViz(Mock(), {}) time_op = 'point_diff' levels = test_viz.levels_for_diff(time_op, groups, df) expected = { 'metric1': 6, 'metric2': 60, 'metric3': 600, } self.assertEqual(expected, levels[0].to_dict()) expected = { 'metric1': {'a1': 2, 'b1': 2, 'c1': 2}, 'metric2': {'a1': 20, 'b1': 20, 'c1': 20}, 'metric3': {'a1': 200, 'b1': 200, 'c1': 200}, } self.assertEqual(expected, levels[1].to_dict()) self.assertEqual(4, len(levels)) self.assertEqual(['groupA', 'groupB', 'groupC'], levels[3].index.names) def test_levels_for_time_calls_process_data_and_drops_cols(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) groups = ['groupA', 'groupB', 'groupC'] test_viz = viz.PartitionViz(Mock(), {'groupby': groups}) def return_args(df_drop, aggregate): return df_drop test_viz.process_data = Mock(side_effect=return_args) levels = test_viz.levels_for_time(groups, df) self.assertEqual(4, len(levels)) cols = [DTTM_ALIAS, 'metric1', 'metric2', 'metric3'] self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist())) cols += ['groupA'] self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist())) cols += ['groupB'] self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist())) cols += ['groupC'] self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist())) self.assertEqual(4, len(test_viz.process_data.mock_calls)) def test_nest_values_returns_hierarchy(self): raw = {} raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) test_viz = viz.PartitionViz(Mock(), {}) groups = ['groupA', 'groupB', 'groupC'] levels = test_viz.levels_for('agg_sum', groups, df) nest = test_viz.nest_values(levels) self.assertEqual(3, len(nest)) for i in range(0, 3): self.assertEqual('metric' + str(i + 1), nest[i]['name']) self.assertEqual(3, len(nest[0]['children'])) self.assertEqual(1, len(nest[0]['children'][0]['children'])) self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children'])) def test_nest_procs_returns_hierarchy(self): raw = {} raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300] raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1'] raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2'] raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3'] raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9] raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) test_viz = viz.PartitionViz(Mock(), {}) groups = ['groupA', 'groupB', 'groupC'] metrics = ['metric1', 'metric2', 'metric3'] procs = {} for i in range(0, 4): df_drop = df.drop(groups[i:], 1) pivot = df_drop.pivot_table( index=DTTM_ALIAS, columns=groups[:i], values=metrics, ) procs[i] = pivot nest = test_viz.nest_procs(procs) self.assertEqual(3, len(nest)) for i in range(0, 3): self.assertEqual('metric' + str(i + 1), nest[i]['name']) self.assertEqual(None, nest[i].get('val')) self.assertEqual(3, len(nest[0]['children'])) self.assertEqual(3, len(nest[0]['children'][0]['children'])) self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children'])) self.assertEqual( 1, len(nest[0]['children'] [0]['children'] [0]['children'] [0]['children']), ) def test_get_data_calls_correct_method(self): test_viz = viz.PartitionViz(Mock(), {}) df = Mock() with self.assertRaises(ValueError): test_viz.get_data(df) test_viz.levels_for = Mock(return_value=1) test_viz.nest_values = Mock(return_value=1) test_viz.form_data['groupby'] = ['groups'] test_viz.form_data['time_series_option'] = 'not_time' test_viz.get_data(df) self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[0][1][0]) test_viz.form_data['time_series_option'] = 'agg_sum' test_viz.get_data(df) self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[1][1][0]) test_viz.form_data['time_series_option'] = 'agg_mean' test_viz.get_data(df) self.assertEqual('agg_mean', test_viz.levels_for.mock_calls[2][1][0]) test_viz.form_data['time_series_option'] = 'point_diff' test_viz.levels_for_diff = Mock(return_value=1) test_viz.get_data(df) self.assertEqual('point_diff', test_viz.levels_for_diff.mock_calls[0][1][0]) test_viz.form_data['time_series_option'] = 'point_percent' test_viz.get_data(df) self.assertEqual('point_percent', test_viz.levels_for_diff.mock_calls[1][1][0]) test_viz.form_data['time_series_option'] = 'point_factor' test_viz.get_data(df) self.assertEqual('point_factor', test_viz.levels_for_diff.mock_calls[2][1][0]) test_viz.levels_for_time = Mock(return_value=1) test_viz.nest_procs = Mock(return_value=1) test_viz.form_data['time_series_option'] = 'adv_anal' test_viz.get_data(df) self.assertEqual(1, len(test_viz.levels_for_time.mock_calls)) self.assertEqual(1, len(test_viz.nest_procs.mock_calls)) test_viz.form_data['time_series_option'] = 'time_series' test_viz.get_data(df) self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[3][1][0]) self.assertEqual(7, len(test_viz.nest_values.mock_calls))
apache-2.0
abhishekkrthakur/scikit-learn
examples/linear_model/plot_logistic.py
312
1426
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logit function ========================================================= Show in the plot is how the logistic regression would, in this synthetic dataset, classify values as either 0 or 1, i.e. class one or two, using the logit-curve. """ print(__doc__) # Code source: Gael Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # this is our test set, it's just a straight line with some # Gaussian noise xmin, xmax = -5, 5 n_samples = 100 np.random.seed(0) X = np.random.normal(size=n_samples) y = (X > 0).astype(np.float) X[X > 0] *= 4 X += .3 * np.random.normal(size=n_samples) X = X[:, np.newaxis] # run the classifier clf = linear_model.LogisticRegression(C=1e5) clf.fit(X, y) # and plot the result plt.figure(1, figsize=(4, 3)) plt.clf() plt.scatter(X.ravel(), y, color='black', zorder=20) X_test = np.linspace(-5, 10, 300) def model(x): return 1 / (1 + np.exp(-x)) loss = model(X_test * clf.coef_ + clf.intercept_).ravel() plt.plot(X_test, loss, color='blue', linewidth=3) ols = linear_model.LinearRegression() ols.fit(X, y) plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1) plt.axhline(.5, color='.5') plt.ylabel('y') plt.xlabel('X') plt.xticks(()) plt.yticks(()) plt.ylim(-.25, 1.25) plt.xlim(-4, 10) plt.show()
bsd-3-clause
effigies/mne-python
examples/realtime/ftclient_rt_average.py
2
2816
""" ======================================================== Compute real-time evoked responses with FieldTrip client ======================================================== This example demonstrates how to connect the MNE real-time system to the Fieldtrip buffer using FieldTripClient class. This example was tested in simulation mode neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif using a modified version of neuromag2ft available at http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip to run the FieldTrip buffer. Then running this example acquires the data on the client side. Since the Fieldtrip buffer does not contain all the measurement information required by the MNE real-time processing pipeline, an info dictionary must be provided to instantiate FieldTripClient. Alternatively, the MNE-Python script will try to guess the missing measurement info from the Fieldtrip Header object. Together with RtEpochs, this can be used to compute evoked responses using moving averages. """ print(__doc__) # Author: Mainak Jas <mainak@neuro.hut.fi> # # License: BSD (3-clause) import mne from mne.viz import plot_events from mne.realtime import FieldTripClient, RtEpochs import matplotlib.pyplot as plt # select the left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 # user must provide list of bad channels because # FieldTrip header object does not provide that bads = ['MEG 2443', 'EEG 053'] plt.ion() # make plot interactive _, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots with FieldTripClient(host='localhost', port=1972, tmax=150, wait_max=10) as rt_client: # get measurement info guessed by MNE-Python raw_info = rt_client.get_measurement_info() # select gradiometers picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True, stim=True, exclude=bads) # create the real-time epochs object rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, stim_channel='STI 014', picks=picks, reject=dict(grad=4000e-13, eog=150e-6), decim=1, isi_max=10.0, proj=None) # start the acquisition rt_epochs.start() for ii, ev in enumerate(rt_epochs.iter_evoked()): print("Just got epoch %d" % (ii + 1)) if ii > 0: ev += evoked evoked = ev ax[0].cla(), ax[1].cla() # clear axis plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'], first_samp=-rt_client.tmin_samp, axes=ax[0]) evoked.plot(axes=ax[1]) # plot on second subplot ax[1].set_title('Evoked response for gradiometer channels' '(event_id = %d)' % event_id) plt.pause(0.05) plt.draw() plt.close()
bsd-3-clause
Mctigger/KagglePlanetPytorch
find_best_threshold.py
1
1496
import numpy as np from sklearn.metrics import fbeta_score, make_scorer import itertools import pathos.multiprocessing def fbeta(true_label, prediction): return fbeta_score(true_label, prediction, beta=2, average='samples') def optimise_f2_thresholds_fast(y, p, iterations=100, verbose=True): best_threshold = [0.2]*17 for t in range(17): best_fbeta = 0 temp_threshhold = [0.2]*17 for i in range(iterations): temp_value = i / float(iterations) temp_threshhold[t] = temp_value temp_fbeta = fbeta(y, p > temp_threshhold) if temp_fbeta > best_fbeta: best_fbeta = temp_fbeta best_threshold[t] = temp_value if verbose: print(t, best_fbeta, best_threshold[t]) return best_threshold def optimise_f2_thresholds(y, p, verbose=True, resolution=100): def mf(x): p2 = np.zeros_like(p) for i in range(17): p2[:, i] = (p[:, i] > x[i]).astype(np.int) score = fbeta_score(y, p2, beta=2, average='samples') return score x = [0.2] * 17 for i in range(17): best_i2 = 0 best_score = 0 for i2 in range(resolution): i2 /= resolution x[i] = i2 score = mf(x) if score > best_score: best_i2 = i2 best_score = score x[i] = best_i2 if verbose: print(i, best_i2, best_score) return x
mit
schae234/gingivere
tests/test_lr.py
2
1117
from sklearn.linear_model import LinearRegression from sklearn.cross_validation import StratifiedKFold import numpy as np from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from tests import shelve_api XX, yy = shelve_api.load('lr') X = XX[2700:] y = yy[2700:] clf = LinearRegression(normalize=True) skf = StratifiedKFold(y, n_folds=2) for train_index, test_index in skf: print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] # clf.fit(X_train, y_train) y_true, y_pred = y_test, clf.predict(X_test) for i, num in enumerate(y_pred): if num < 0.0: y_pred[i] = 0.0 continue elif num > 1.0: y_pred[i] = 1.0 continue print(classification_report(np.around(y_true), np.around(y_pred))) print() print(roc_auc_score(y_true, y_pred)) print()
mit
xguse/scikit-bio
skbio/stats/distance/_bioenv.py
12
9577
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from itertools import combinations import numpy as np import pandas as pd from scipy.spatial.distance import pdist from scipy.stats import spearmanr from skbio.stats.distance import DistanceMatrix from skbio.util._decorator import experimental @experimental(as_of="0.4.0") def bioenv(distance_matrix, data_frame, columns=None): """Find subset of variables maximally correlated with distances. Finds subsets of variables whose Euclidean distances (after scaling the variables; see Notes section below for details) are maximally rank-correlated with the distance matrix. For example, the distance matrix might contain distances between communities, and the variables might be numeric environmental variables (e.g., pH). Correlation between the community distance matrix and Euclidean environmental distance matrix is computed using Spearman's rank correlation coefficient (:math:`\\rho`). Subsets of environmental variables range in size from 1 to the total number of variables (inclusive). For example, if there are 3 variables, the "best" variable subsets will be computed for subset sizes 1, 2, and 3. The "best" subset is chosen by computing the correlation between the community distance matrix and all possible Euclidean environmental distance matrices at the given subset size. The combination of environmental variables with maximum correlation is chosen as the "best" subset. Parameters ---------- distance_matrix : DistanceMatrix Distance matrix containing distances between objects (e.g., distances between samples of microbial communities). data_frame : pandas.DataFrame Contains columns of variables (e.g., numeric environmental variables such as pH) associated with the objects in `distance_matrix`. Must be indexed by the IDs in `distance_matrix` (i.e., the row labels must be distance matrix IDs), but the order of IDs between `distance_matrix` and `data_frame` need not be the same. All IDs in the distance matrix must be present in `data_frame`. Extra IDs in `data_frame` are allowed (they are ignored in the calculations). columns : iterable of strs, optional Column names in `data_frame` to include as variables in the calculations. If not provided, defaults to all columns in `data_frame`. The values in each column must be numeric or convertible to a numeric type. Returns ------- pandas.DataFrame Data frame containing the "best" subset of variables at each subset size, as well as the correlation coefficient of each. Raises ------ TypeError If invalid input types are provided, or if one or more specified columns in `data_frame` are not numeric. ValueError If column name(s) or `distance_matrix` IDs cannot be found in `data_frame`, if there is missing data (``NaN``) in the environmental variables, or if the environmental variables cannot be scaled (e.g., due to zero variance). See Also -------- scipy.stats.spearmanr Notes ----- See [1]_ for the original method reference (originally called BIO-ENV). The general algorithm and interface are similar to ``vegan::bioenv``, available in R's vegan package [2]_. This method can also be found in PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST). .. warning:: This method can take a *long* time to run if a large number of variables are specified, as all possible subsets are evaluated at each subset size. The variables are scaled before computing the Euclidean distance: each column is centered and then scaled by its standard deviation. References ---------- .. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate community structure to environmental variables". Marine Ecology Progress Series, 92, 205-219. .. [2] http://cran.r-project.org/web/packages/vegan/index.html .. [3] http://www.primer-e.com/primer.htm Examples -------- Import the functionality we'll use in the following examples: >>> import pandas as pd >>> from skbio import DistanceMatrix >>> from skbio.stats.distance import bioenv Load a 4x4 community distance matrix: >>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75], ... [0.5, 0.0, 0.1, 0.42], ... [0.25, 0.1, 0.0, 0.33], ... [0.75, 0.42, 0.33, 0.0]], ... ['A', 'B', 'C', 'D']) Load a ``pandas.DataFrame`` with two environmental variables, pH and elevation: >>> df = pd.DataFrame([[7.0, 400], ... [8.0, 530], ... [7.5, 450], ... [8.5, 810]], ... index=['A','B','C','D'], ... columns=['pH', 'Elevation']) Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``, ``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in order to link the environmental variables (metadata) to each of the objects in the distance matrix. In this example, the IDs appear in the same order in both the distance matrix and data frame, but this is not necessary. Find the best subsets of environmental variables that are correlated with community distances: >>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE size correlation vars pH 1 0.771517 pH, Elevation 2 0.714286 We see that in this simple example, pH alone is maximally rank-correlated with the community distances (:math:`\\rho=0.771517`). """ if not isinstance(distance_matrix, DistanceMatrix): raise TypeError("Must provide a DistanceMatrix as input.") if not isinstance(data_frame, pd.DataFrame): raise TypeError("Must provide a pandas.DataFrame as input.") if columns is None: columns = data_frame.columns.values.tolist() if len(set(columns)) != len(columns): raise ValueError("Duplicate column names are not supported.") if len(columns) < 1: raise ValueError("Must provide at least one column.") for column in columns: if column not in data_frame: raise ValueError("Column '%s' not in data frame." % column) # Subset and order the vars data frame to match the IDs in the distance # matrix, only keeping the specified columns. vars_df = data_frame.loc[distance_matrix.ids, columns] if vars_df.isnull().any().any(): raise ValueError("One or more IDs in the distance matrix are not " "in the data frame, or there is missing data in the " "data frame.") try: vars_df = vars_df.astype(float) except ValueError: raise TypeError("All specified columns in the data frame must be " "numeric.") # Scale the vars and extract the underlying numpy array from the data # frame. We mainly do this for performance as we'll be taking subsets of # columns within a tight loop and using a numpy array ends up being ~2x # faster. vars_array = _scale(vars_df).values dm_flat = distance_matrix.condensed_form() num_vars = len(columns) var_idxs = np.arange(num_vars) # For each subset size, store the best combination of variables: # (string identifying best vars, subset size, rho) max_rhos = np.empty(num_vars, dtype=[('vars', object), ('size', int), ('correlation', float)]) for subset_size in range(1, num_vars + 1): max_rho = None for subset_idxs in combinations(var_idxs, subset_size): # Compute Euclidean distances using the current subset of # variables. pdist returns the distances in condensed form. vars_dm_flat = pdist(vars_array[:, subset_idxs], metric='euclidean') rho = spearmanr(dm_flat, vars_dm_flat)[0] # If there are ties for the best rho at a given subset size, choose # the first one in order to match vegan::bioenv's behavior. if max_rho is None or rho > max_rho[0]: max_rho = (rho, subset_idxs) vars_label = ', '.join([columns[i] for i in max_rho[1]]) max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0]) return pd.DataFrame.from_records(max_rhos, index='vars') def _scale(df): """Center and scale each column in a data frame. Each column is centered (by subtracting the mean) and then scaled by its standard deviation. """ # Modified from http://stackoverflow.com/a/18005745 df = df.copy() df -= df.mean() df /= df.std() if df.isnull().any().any(): raise ValueError("Column(s) in the data frame could not be scaled, " "likely because the column(s) had no variance.") return df
bsd-3-clause
jbedorf/tensorflow
tensorflow/contrib/learn/python/learn/grid_search_test.py
137
2035
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Grid search tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random from tensorflow.contrib.learn.python import learn from tensorflow.python.platform import test HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False) if HAS_SKLEARN: try: # pylint: disable=g-import-not-at-top from sklearn import datasets from sklearn.grid_search import GridSearchCV from sklearn.metrics import accuracy_score except ImportError: HAS_SKLEARN = False class GridSearchTest(test.TestCase): """Grid search tests.""" def testIrisDNN(self): if HAS_SKLEARN: random.seed(42) iris = datasets.load_iris() feature_columns = learn.infer_real_valued_columns_from_input(iris.data) classifier = learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) grid_search = GridSearchCV( classifier, {'hidden_units': [[5, 5], [10, 10]]}, scoring='accuracy', fit_params={'steps': [50]}) grid_search.fit(iris.data, iris.target) score = accuracy_score(iris.target, grid_search.predict(iris.data)) self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score)) if __name__ == '__main__': test.main()
apache-2.0
iszlai/sklearn_pycon2015
notebooks/fig_code/sgd_separator.py
54
1148
import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs def plot_sgd_separator(): # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([x1, x2]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' ax = plt.axes() ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) ax.axis('tight') if __name__ == '__main__': plot_sgd_separator() plt.show()
bsd-3-clause
lesserwhirls/scipy-cwt
scipy/signal/cwt.py
1
25837
import numpy as np from scipy.fftpack import fft, ifft, fftshift __all__ = ['cwt', 'ccwt', 'icwt', 'SDG', 'Morlet'] class MotherWavelet(object): """Class for MotherWavelets. Contains methods related to mother wavelets. Also used to ensure that new mother wavelet objects contain the minimum requirements to be used in the cwt related functions. """ @staticmethod def get_coefs(self): """Raise error if method for calculating mother wavelet coefficients is missing! """ raise NotImplementedError('get_coefs needs to be implemented for the mother wavelet') @staticmethod def get_coi_coef(sampf): """Raise error if Cone of Influence coefficient is not set in subclass wavelet. To follow the convention in the literature, please define your COI coef as a function of period, not scale - this will ensure compatibility with the scalogram method. """ raise NotImplementedError('coi_coef needs to be implemented in subclass wavelet') #add methods for computing cone of influence and mask def get_coi(self): """Compute cone of influence.""" y1 = self.coi_coef * np.arange(0, self.len_signal / 2) y2 = -self.coi_coef * np.arange(0, self.len_signal / 2) + y1[-1] coi = np.r_[y1, y2] self.coi = coi return coi def get_mask(self): """Get mask for cone of influence. Sets self.mask as an array of bools for use in np.ma.array('', mask=mask) """ mask = np.ones(self.coefs.shape) masks = self.coi_coef * self.scales for s in range(0, len(self.scales)): if (s != 0) and (int(np.ceil(masks[s])) < mask.shape[1]): mask[s,np.ceil(int(masks[s])):-np.ceil(int(masks[s]))] = 0 self.mask = mask.astype(bool) return self.mask class SDG(MotherWavelet): """Class for the SDG MotherWavelet (a subclass of MotherWavelet). SDG(self, len_signal = None, pad_to = None, scales = None, sampf = 1, normalize = True, fc = 'bandpass') Parameters ---------- len_signal : int Length of time series to be decomposed. pad_to : int Pad time series to a total length `pad_to` using zero padding (note, the signal will be zero padded automatically during continuous wavelet transform if pad_to is set). This is used in the fft function when performing the convolution of the wavelet and mother wavelet in Fourier space. scales : array Array of scales used to initialize the mother wavelet. sampf : float Sample frequency of the time series to be decomposed. normalize : bool If True, the normalized version of the mother wavelet will be used (i.e. the mother wavelet will have unit energy). fc : string Characteristic frequency - use the 'bandpass' or 'center' frequency of the Fourier spectrum of the mother wavelet to relate scale to period (default is 'bandpass'). Returns ------- Returns an instance of the MotherWavelet class which is used in the cwt and icwt functions. Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10),normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() Notes ----- None References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ def __init__(self,len_signal=None,pad_to=None,scales=None,sampf=1,normalize=True, fc = 'bandpass'): """Initilize SDG mother wavelet""" self.name='second degree of a Gaussian (mexican hat)' self.sampf = sampf self.scales = scales self.len_signal = len_signal self.normalize = normalize #set total length of wavelet to account for zero padding if pad_to is None: self.len_wavelet = len_signal else: self.len_wavelet = pad_to #set admissibility constant if normalize: self.cg = 4 * np.sqrt(np.pi) / 3. else: self.cg = np.pi #define characteristic frequency if fc is 'bandpass': self.fc = np.sqrt(5./2.) * self.sampf/(2 * np.pi) elif fc is 'center': self.fc = np.sqrt(2.) * self.sampf / (2 * np.pi) else: raise CharacteristicFrequencyError("fc = %s not defined"%(fc,)) # coi_coef defined under the assumption that period is used, not scale self.coi_coef = 2 * np.pi * np.sqrt(2. / 5.) * self.fc # Torrence and # Compo 1998 # compute coefficients for the dilated mother wavelet self.coefs = self.get_coefs() def get_coefs(self): """Calculate the coefficients for the SDG mother wavelet""" # Create array containing values used to evaluate the wavelet function xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.) # find mother wavelet coefficients at each scale xsd = -xi * xi / (self.scales[:,np.newaxis] * self.scales[:,np.newaxis]) if self.normalize is True: c=2. / (np.sqrt(3) * np.power(np.pi, 0.25)) else: c=1. mw = c * (1. + xsd) * np.exp(xsd / 2.) self.coefs = mw return mw class Morlet(MotherWavelet): """Class for the Morlet MotherWavelet (a subclass of MotherWavelet). Morlet(self, len_signal = None, pad_to = None, scales = None, sampf = 1, f0 = 0.849) Parameters ---------- len_signal : int Length of time series to be decomposed. pad_to : int Pad time series to a total length `pad_to` using zero padding (note, the signal will be zero padded automatically during continuous wavelet transform if pad_to is set). This is used in the fft function when performing the convolution of the wavelet and mother wavelet in Fourier space. scales : array Array of scales used to initialize the mother wavelet. sampf : float Sample frequency of the time series to be decomposed. f0 : float Central frequency of the Morlet mother wavelet. The Fourier spectrum of the Morlet wavelet appears as a Gaussian centered on f0. f0 defaults to a value of 0.849 (the angular frequency would be ~5.336). Returns ------- Returns an instance of the MotherWavelet class which is used in the cwt and icwt functions. Examples -------- Create instance of Morlet mother wavelet using 10 scales, perform the continuous wavelet transform, and plot the resulting scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = Morlet(len_signal=len(data), scales = np.arange(10)) # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() Notes ----- * Morlet wavelet is defined as having unit energy, so the `normalize` flag will always be set to True. * The Morlet wavelet will always use f0 as it's characteristic frequency, so fc is set as f0. References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ def __init__(self, len_signal=None, pad_to=None, scales=None, sampf=1, normalize=True, f0=0.849): """Initilize Morlet mother wavelet.""" from scipy.integrate import trapz self.sampf = sampf self.scales = scales self.len_signal = len_signal self.normalize = True self.name = 'Morlet' # set total length of wavelet to account for zero padding if pad_to is None: self.len_wavelet = len_signal else: self.len_wavelet = pad_to # define characteristic frequency self.fc = f0 # Cone of influence coefficient self.coi_coef = 2. * self.sampf / (self.fc + np.sqrt(2. + self.fc**2) * np.sqrt(2)); #Torrence and Compo 1998 (in code) # set admissibility constant # based on the simplified Morlet wavelet energy spectrum # in Addison (2002), eqn (2.39) - should be ok for f0 >0.84 f = np.arange(0.001, 50, 0.001) y = 2. * np.sqrt(np.pi) * np.exp(-np.power((2. * np.pi * f - 2. * np.pi * self.fc), 2)) self.cg = trapz(y[1:] / f[1:]) * (f[1]-f[0]) # compute coefficients for the dilated mother wavelet self.coefs = self.get_coefs() def get_coefs(self): """Calculate the coefficients for the Morlet mother wavelet.""" # Create array containing values used to evaluate the wavelet function xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.) # find mother wavelet coefficients at each scale xsd = xi / (self.scales[:,np.newaxis]) mw = np.power(np.pi,-0.25) * \ (np.exp(np.complex(1j) * 2. * np.pi * self.fc * xsd) - \ np.exp(-np.power((2. * np.pi * self.fc), 2) / 2.)) * \ np.exp(-np.power(xsd, 2) / 2.) self.coefs = mw return mw class Wavelet(object): """Class for Wavelet object. The Wavelet object holds the wavelet coefficients as well as information on how they were obtained. """ def __init__(self, wt, wavelet, weighting_function, signal_dtype, deep_copy=True): """Initialization of Wavelet object. Parameters ---------- wt : array Array of wavelet coefficients. wavelet : object Mother wavelet object used in the creation of `wt`. weighting_function : function Function used in the creation of `wt`. signal_dtype : dtype dtype of signal used in the creation of `wt`. deep_copy : bool If true (default), the mother wavelet object used in the creation of the wavelet object will be fully copied and accessible through wavelet.motherwavelet; if false, wavelet.motherwavelet will be a reference to the motherwavelet object (that is, if you change the mother wavelet object, you will see the changes when accessing the mother wavelet through the wavelet object - this is NOT good for tracking how the wavelet transform was computed, but setting deep_copy to False will save memory). Returns ------- Returns an instance of the Wavelet class. """ from copy import deepcopy self.coefs = wt[:,0:wavelet.len_signal] if wavelet.len_signal != wavelet.len_wavelet: self._pad_coefs = wt[:,wavelet.len_signal:] else: self._pad_coefs = None if deep_copy: self.motherwavelet = deepcopy(wavelet) else: self.motherwavelet = wavelet self.weighting_function = weighting_function self._signal_dtype = signal_dtype def get_gws(self): """Calculate Global Wavelet Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ gws = self.get_wavelet_var() return gws def get_wes(self): """Calculate Wavelet Energy Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ from scipy.integrate import trapz coef = 1. / (self.motherwavelet.fc * self.motherwavelet.cg) wes = coef * trapz(np.power(np.abs(self.coefs), 2), axis = 1); return wes def get_wps(self): """Calculate Wavelet Power Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ wps = (1./ self.motherwavelet.len_signal) * self.get_wes() return wps def get_wavelet_var(self): """Calculate Wavelet Variance (a.k.a. the Global Wavelet Spectrum of Torrence and Compo (1998)). References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ coef = self.motherwavelet.cg * self.motherwavelet.fc wvar = (coef / self.motherwavelet.len_signal) * self.get_wes() return wvar def scalogram(self, show_coi=False, show_wps=False, ts=None, time=None, use_period=True, ylog_base=None, xlog_base=None, origin='top', figname=None): """ Scalogram plotting routine. Creates a simple scalogram, with optional wavelet power spectrum and time series plots of the transformed signal. Parameters ---------- show_coi : bool Set to True to see Cone of Influence show_wps : bool Set to True to see the Wavelet Power Spectrum ts : array 1D array containing time series data used in wavelet transform. If set, time series will be plotted. time : array of datetime objects 1D array containing time information use_period : bool Set to True to see figures use period instead of scale ylog_base : float If a log scale is desired, set `ylog_base` as float. (for log 10, set ylog_base = 10) xlog_base : float If a log scale is desired, set `xlog_base` as float. (for log 10, set xlog_base = 10) *note that this option is only valid for the wavelet power spectrum figure. origin : 'top' or 'bottom' Set origin of scale axis to top or bottom of figure Returns ------- None Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram(origin = 'bottom') """ import matplotlib.pyplot as plt import matplotlib.cm as cm from pylab import poly_between if ts is not None: show_ts = True else: show_ts = False if not show_wps and not show_ts: # only show scalogram figrow = 1 figcol = 1 elif show_wps and not show_ts: # show scalogram and wps figrow = 1 figcol = 4 elif not show_wps and show_ts: # show scalogram and ts figrow = 2 figcol = 1 else: # show scalogram, wps, and ts figrow = 2 figcol = 4 if time is None: x = np.arange(self.motherwavelet.len_signal) else: x = time if use_period: y = self.motherwavelet.scales / self.motherwavelet.fc else: y = self.motherwavelet.scales fig = plt.figure(figsize=(16, 12), dpi=160) ax1 = fig.add_subplot(figrow, figcol, 1) # if show wps, give 3/4 space to scalogram, 1/4 to wps if show_wps: # create temp axis at 3 or 4 col of row 1 axt = fig.add_subplot(figrow, figcol, 3) # get location of axtmp and ax1 axt_pos = axt.get_position() ax1_pos = ax1.get_position() axt_points = axt_pos.get_points() ax1_points = ax1_pos.get_points() # set axt_pos left bound to that of ax1 axt_points[0][0] = ax1_points[0][0] ax1.set_position(axt_pos) fig.delaxes(axt) if show_coi: # coi_coef is defined using the assumption that you are using # period, not scale, in plotting - this handles that behavior if use_period: coi = self.motherwavelet.get_coi() / self.motherwavelet.fc / self.motherwavelet.sampf else: coi = self.motherwavelet.get_coi() coi[coi == 0] = y.min() - 0.1 * y.min() xs, ys = poly_between(np.arange(0, len(coi)), np.max(y), coi) ax1.fill(xs, ys, 'k', alpha=0.4, zorder = 2) contf=ax1.contourf(x,y,np.abs(self.coefs)**2) fig.colorbar(contf, ax=ax1, orientation='vertical', format='%2.1f') if ylog_base is not None: ax1.axes.set_yscale('log', basey=ylog_base) if origin is 'top': ax1.set_ylim((y[-1], y[0])) elif origin is 'bottom': ax1.set_ylim((y[0], y[-1])) else: raise OriginError('`origin` must be set to "top" or "bottom"') ax1.set_xlim((x[0], x[-1])) ax1.set_title('scalogram') ax1.set_ylabel('time') if use_period: ax1.set_ylabel('period') ax1.set_xlabel('time') else: ax1.set_ylabel('scales') if time is not None: ax1.set_xlabel('time') else: ax1.set_xlabel('sample') if show_wps: ax2 = fig.add_subplot(figrow,figcol,4,sharey=ax1) if use_period: ax2.plot(self.get_wps(), y, 'k') else: ax2.plot(self.motherwavelet.fc * self.get_wps(), y, 'k') if ylog_base is not None: ax2.axes.set_yscale('log', basey=ylog_base) if xlog_base is not None: ax2.axes.set_xscale('log', basey=xlog_base) if origin is 'top': ax2.set_ylim((y[-1], y[0])) else: ax2.set_ylim((y[0], y[-1])) if use_period: ax2.set_ylabel('period') else: ax2.set_ylabel('scales') ax2.grid() ax2.set_title('wavelet power spectrum') if show_ts: ax3 = fig.add_subplot(figrow, 2, 3, sharex=ax1) ax3.plot(x, ts) ax3.set_xlim((x[0], x[-1])) ax3.legend(['time series']) ax3.grid() # align time series fig with scalogram fig t = ax3.get_position() ax3pos=t.get_points() ax3pos[1][0]=ax1.get_position().get_points()[1][0] t.set_points(ax3pos) ax3.set_position(t) if (time is not None) or use_period: ax3.set_xlabel('time') else: ax3.set_xlabel('sample') if figname is None: plt.show() else: plt.savefig(figname) plt.close('all') def cwt(x, wavelet, weighting_function=lambda x: x**(-0.5), deep_copy=True): """Computes the continuous wavelet transform of x using the mother wavelet `wavelet`. This function computes the continuous wavelet transform of x using an instance a mother wavelet object. The cwt is defined as: T(a,b) = w(a) integral(-inf,inf)(x(t) * psi*{(t-b)/a} dt which is a convolution. In this algorithm, the convolution in the time domain is implemented as a multiplication in the Fourier domain. Parameters ---------- x : 1D array Time series to be transformed by the cwt wavelet : Instance of the MotherWavelet class Instance of the MotherWavelet class for a particular wavelet family weighting_function: Function used to weight Typically w(a) = a^(-0.5) is chosen as it ensures that the wavelets at every scale have the same energy. deep_copy : bool If true (default), the mother wavelet object used in the creation of the wavelet object will be fully copied and accessible through wavelet.motherwavelet; if false, wavelet.motherwavelet will be a reference to the motherwavelet object (that is, if you change the mother wavelet object, you will see the changes when accessing the mother wavelet through the wavelet object - this is NOT good for tracking how the wavelet transform was computed, but setting deep_copy to False will save memory). Returns ------- Returns an instance of the Wavelet class. The coefficients of the transform can be obtain by the coefs() method (i.e. wavelet.coefs() ) Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ signal_dtype = x.dtype if len(x) < wavelet.len_wavelet: n = len(x) x = np.resize(x, (wavelet.len_wavelet,)) x[n:] = 0 # Transform the signal and mother wavelet into the Fourier domain xf=fft(x) mwf=fft(wavelet.coefs.conj(), axis=1) # Convolve (multiply in Fourier space) wt_tmp=ifft(mwf*xf[np.newaxis,:], axis=1) # shift output from ifft and multiply by weighting function wt = fftshift(wt_tmp,axes=[1]) * weighting_function(wavelet.scales[:, np.newaxis]) # if mother wavelet and signal are real, only keep real part of transform wt=wt.astype(np.lib.common_type(wavelet.coefs, x)) return Wavelet(wt,wavelet,weighting_function,signal_dtype,deep_copy) def ccwt(x1, x2, wavelet): """Compute the continuous cross-wavelet transform of 'x1' and 'x2' using the mother wavelet 'wavelet', which is an instance of the MotherWavelet class. Parameters ---------- x1,x2 : 1D array Time series used to compute cross-wavelet transform wavelet : Instance of the MotherWavelet class Instance of the MotherWavelet class for a particular wavelet family Returns ------- Returns an instance of the Wavelet class. """ xwt = cwt(x1,wavelet) * np.conjugate(cwt(x2, wavelet)) return xwt def icwt(wavelet): """Compute the inverse continuous wavelet transform. Parameters ---------- wavelet : Instance of the MotherWavelet class instance of the MotherWavelet class for a particular wavelet family Examples -------- Use the Morlet mother wavelet to perform wavelet transform on 'data', then use icwt to compute the inverse wavelet transform to come up with an estimate of data ('data2'). Note that data2 is not exactly equal data. # import matplotlib.pyplot as plt # from scipy.signal import SDG, Morlet, cwt, icwt, fft, ifft # import numpy as np # # x = np.arange(0,2*np.pi,np.pi/64) # data = np.sin(8*x) # scales=np.arange(0.5,17) # # mother_wavelet = Morlet(len_signal = len(data), scales = scales) # wave_coefs=cwt(data, mother_wavelet) # data2 = icwt(wave_coefs) # # plt.plot(data) # plt.plot(data2) # plt.show() References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ from scipy.integrate import trapz # if original wavelet was created using padding, make sure to include # information that is missing after truncation (see self.coefs under __init__ # in class Wavelet. if wavelet.motherwavelet.len_signal != wavelet.motherwavelet.len_wavelet: full_wc = np.c_[wavelet.coefs,wavelet._pad_coefs] else: full_wc = wavelet.coefs # get wavelet coefficients and take fft wcf = fft(full_wc,axis=1) # get mother wavelet coefficients and take fft mwf = fft(wavelet.motherwavelet.coefs,axis=1) # perform inverse continuous wavelet transform and make sure the result is the same type # (real or complex) as the original data used in the transform x = (1. / wavelet.motherwavelet.cg) * trapz(fftshift(ifft(wcf * mwf,axis=1),axes=[1]) / (wavelet.motherwavelet.scales[:,np.newaxis]**2), dx = 1. / wavelet.motherwavelet.sampf, axis=0) return x[0:wavelet.motherwavelet.len_signal].astype(wavelet._signal_dtype)
bsd-3-clause
booya-at/paraBEM
examples/plots/far_field_error_src.py
2
1317
# -*- coding: utf-8 -*- import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import parabem from parabem.pan3d import src_3_0_vsaero, src_3_0_n0 from parabem.utils import check_path pnt1 = parabem.PanelVector3(-0.5, -0.5, 0) pnt2 = parabem.PanelVector3(0.5, -0.5, 0) pnt3 = parabem.PanelVector3(0.5, 0.5, 0) pnt4 = parabem.PanelVector3(-0.5, 0.5, 0) source = parabem.Panel3([pnt1, pnt2, pnt3, pnt4]) x = np.linspace(0, 5, 500) y = [] for xi in x: target = parabem.PanelVector3(xi, 0.0, 0.1) panel_infl = src_3_0_vsaero(target, source) point_infl = src_3_0_n0(target, source) y.append([panel_infl, point_infl, abs(panel_infl - point_infl)]) y = list(zip(*y)) plt.figure(figsize=(8,3)) plt.gcf().subplots_adjust(bottom=0.15) plt.plot(x, y[0], label=u"Exakte Lösung") plt.plot(x, y[1], label=u"Näherungslösung") plt.grid(True) plt.legend() plt.ylabel("Einfluss") plt.xlabel("x") plt.ylim(0., 0.6) plt.savefig(check_path("results/3d/far_vs_near_source.png")) plt.close() plt.figure(figsize=(8,3)) plt.gcf().subplots_adjust(bottom=0.15) plt.plot(x, y[2], label="Fehler durch Fernfeldmethode") plt.yscale('log') plt.grid(True) plt.legend() plt.ylim(-0.1, 0.6) plt.xlabel("x") plt.ylabel("Fehler") plt.savefig(check_path("results/3d/far_error_source.png"))
gpl-3.0
pannarale/pycbc
pycbc/results/followup.py
6
4568
# Copyright (C) 2014 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ This module provides functions to generate followup plots and trigger time series. """ import h5py, numpy, matplotlib # Only if a backend is not already set ... This should really *not* be done # here, but in the executables you should set matplotlib.use() # This matches the check that matplotlib does internally, but this *may* be # version dependenant. If this is a problem then remove this and control from # the executables directly. import sys if 'matplotlib.backends' not in sys.modules: matplotlib.use('agg') import pylab, mpld3, mpld3.plugins from ligo.segments import segment def columns_from_file_list(file_list, columns, ifo, start, end): """ Return columns of information stored in single detector trigger files. Parameters ---------- file_list_file : string pickle file containing the list of single detector triggers. ifo : string The ifo to return triggers for. columns : list of strings The list of columns to read from the trigger files. start : int The start time to get triggers from end : int The end time to get triggers from Returns ------- trigger_dict : dict A dictionary of column vectors with column names as keys. """ file_list = file_list.find_output_with_ifo(ifo) file_list = file_list.find_all_output_in_range(ifo, segment(start, end)) trig_dict = {} for trig_file in file_list: f = h5py.File(trig_file.storage_path, 'r') time = f['end_time'][:] pick = numpy.logical_and(time < end, time > start) pick_loc = numpy.where(pick)[0] for col in columns: if col not in trig_dict: trig_dict[col] = [] trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]]) return trig_dict ifo_color = {'H1': 'blue', 'L1':'red', 'V1':'green'} def coinc_timeseries_plot(coinc_file, start, end): fig = pylab.figure() f = h5py.File(coinc_file, 'r') stat1 = f['foreground/stat1'] stat2 = f['foreground/stat2'] time1 = f['foreground/time1'] time2 = f['foreground/time2'] ifo1 = f.attrs['detector_1'] ifo2 = f.attrs['detector_2'] pylab.scatter(time1, stat1, label=ifo1, color=ifo_color[ifo1]) pylab.scatter(time2, stat2, label=ifo2, color=ifo_color[ifo2]) fmt = '.12g' mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt)) pylab.legend() pylab.xlabel('Time (s)') pylab.ylabel('NewSNR') pylab.grid() return mpld3.fig_to_html(fig) def trigger_timeseries_plot(file_list, ifos, start, end): fig = pylab.figure() for ifo in ifos: trigs = columns_from_file_list(file_list, ['snr', 'end_time'], ifo, start, end) print(trigs) pylab.scatter(trigs['end_time'], trigs['snr'], label=ifo, color=ifo_color[ifo]) fmt = '.12g' mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt)) pylab.legend() pylab.xlabel('Time (s)') pylab.ylabel('SNR') pylab.grid() return mpld3.fig_to_html(fig) def times_to_urls(times, window, tag): base = '/../followup/%s/%s/%s' return times_to_links(times, window, tag, base=base) def times_to_links(times, window, tag, base=None): if base is None: base = "<a href='/../followup/%s/%s/%s' target='_blank'>followup</a>" urls = [] for time in times: start = time - window end = time + window urls.append(base % (tag, start, end)) return urls
gpl-3.0
WangWenjun559/Weiss
classifier/daily_train.py
1
1788
""" This file builds a model from training data, which can be incorporated into daily pipeline. =========================================================================================== TODO(wenjunw@cs.cmu.edu): - change the path of training file, its transformed feature file, and the model file currently these files are in the same directory as this python script. - In the feature, the parameter values of the SVM classifier may be modified Usage: python daily_train.py Author: Wenjun Wang Date: June 28, 2015 """ from train import Train from typeTrain import * from liblinearutil import * from sklearn.externals import joblib import time def main(): ### Train Wenjun's classifier # Name of files needed when training a model date = time.strftime('%Y-%m-%d') train_file = 'training' # name of original training file feature_file = 'models/training_file_'+date # name of transformed training file feature_output = 'models/features_'+date # name of feature file stpfile = 'english.stp' # english stopwords file feature_arg = '-uni -pos2 -stem -stprm' # types of features need to extract log = open('models/training_log','a') # log file log.write('Feature Arguments: %s\n-------------------------------\n'% feature_arg) # Create appropriate input file for LibLINEAR (SVM) training = Train(train_file, stpfile, feature_output, feature_file, feature_arg) training.convert_file() # Use LibLINEAR to train the model and save the model y, x = svm_read_problem(feature_file) m = train(y, x, '-c 3 -s 1 -B 1 -e 0.01 -v 5 -q') save_model('models/model_'+date, m) """ ### Train Austin's classifier tt = TypeTrain('models/type_model_' + date) tt.train() """ if __name__ == '__main__': main()
apache-2.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/util/clipboard/__init__.py
7
3420
""" Pyperclip A cross-platform clipboard module for Python. (only handles plain text for now) By Al Sweigart al@inventwithpython.com BSD License Usage: import pyperclip pyperclip.copy('The text to be copied to the clipboard.') spam = pyperclip.paste() if not pyperclip.copy: print("Copy functionality unavailable!") On Windows, no additional modules are needed. On Mac, the module uses pbcopy and pbpaste, which should come with the os. On Linux, install xclip or xsel via package manager. For example, in Debian: sudo apt-get install xclip Otherwise on Linux, you will need the gtk or PyQt4 modules installed. gtk and PyQt4 modules are not available for Python 3, and this module does not work with PyGObject yet. """ __version__ = '1.5.27' # flake8: noqa import platform import os import subprocess from .clipboards import (init_osx_clipboard, init_gtk_clipboard, init_qt_clipboard, init_xclip_clipboard, init_xsel_clipboard, init_klipper_clipboard, init_no_clipboard) from .windows import init_windows_clipboard # `import PyQt4` sys.exit()s if DISPLAY is not in the environment. # Thus, we need to detect the presence of $DISPLAY manually # and not load PyQt4 if it is absent. HAS_DISPLAY = os.getenv("DISPLAY", False) CHECK_CMD = "where" if platform.system() == "Windows" else "which" def _executable_exists(name): return subprocess.call([CHECK_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 def determine_clipboard(): # Determine the OS/platform and set # the copy() and paste() functions accordingly. if 'cygwin' in platform.system().lower(): # FIXME: pyperclip currently does not support Cygwin, # see https://github.com/asweigart/pyperclip/issues/55 pass elif os.name == 'nt' or platform.system() == 'Windows': return init_windows_clipboard() if os.name == 'mac' or platform.system() == 'Darwin': return init_osx_clipboard() if HAS_DISPLAY: # Determine which command/module is installed, if any. try: import gtk # check if gtk is installed except ImportError: pass else: return init_gtk_clipboard() try: import PyQt4 # check if PyQt4 is installed except ImportError: pass else: return init_qt_clipboard() if _executable_exists("xclip"): return init_xclip_clipboard() if _executable_exists("xsel"): return init_xsel_clipboard() if _executable_exists("klipper") and _executable_exists("qdbus"): return init_klipper_clipboard() return init_no_clipboard() def set_clipboard(clipboard): global copy, paste clipboard_types = {'osx': init_osx_clipboard, 'gtk': init_gtk_clipboard, 'qt': init_qt_clipboard, 'xclip': init_xclip_clipboard, 'xsel': init_xsel_clipboard, 'klipper': init_klipper_clipboard, 'windows': init_windows_clipboard, 'no': init_no_clipboard} copy, paste = clipboard_types[clipboard]() copy, paste = determine_clipboard() __all__ = ["copy", "paste"] # pandas aliases clipboard_get = paste clipboard_set = copy
gpl-3.0
dhermes/bezier
src/python/bezier/curved_polygon.py
1
9291
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Curved polygon and associated helpers. A curved polygon (in :math:`\mathbf{R}^2`) is defined by the collection of B |eacute| zier curves that determine the boundary. .. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE :trim: .. testsetup:: * import numpy as np import bezier """ from bezier import _helpers from bezier import _plot_helpers from bezier import _triangle_helpers class CurvedPolygon: """Represents an object defined by its curved boundary. The boundary is a piecewise defined collection of B |eacute| zier curves. .. note:: The direction of the nodes in each :class:`.Curve` on the boundary is important. When verifying, we check that one curve begins where the last one ended. .. image:: ../../images/curved_polygon_constructor1.png :align: center .. doctest:: curved-polygon-constructor >>> import bezier >>> import numpy as np >>> nodes0 = np.asfortranarray([ ... [0.0, 1.0, 2.0], ... [0.0, -1.0, 0.0], ... ]) >>> edge0 = bezier.Curve(nodes0, degree=2) >>> nodes1 = np.asfortranarray([ ... [2.0, 2.0], ... [0.0, 1.0], ... ]) >>> edge1 = bezier.Curve(nodes1, degree=1) >>> nodes2 = np.asfortranarray([ ... [2.0, 1.0, 0.0], ... [1.0, 2.0, 1.0], ... ]) >>> edge2 = bezier.Curve(nodes2, degree=2) >>> nodes3 = np.asfortranarray([ ... [0.0, 0.0], ... [1.0, 0.0], ... ]) >>> edge3 = bezier.Curve(nodes3, degree=1) >>> curved_poly = bezier.CurvedPolygon( ... edge0, edge1, edge2, edge3) >>> curved_poly <CurvedPolygon (num_sides=4)> .. testcleanup:: curved-polygon-constructor import make_images make_images.curved_polygon_constructor1(curved_poly) Though the endpoints of each pair of edges are verified to match, the curved polygon as a whole is not verified, so creating a curved polygon with self-intersections is possible: .. image:: ../../images/curved_polygon_constructor2.png :align: center .. doctest:: curved-polygon-constructor-invalid >>> nodes0 = np.asfortranarray([ ... [0.0, 1.0], ... [0.0, 0.0], ... ]) >>> edge0 = bezier.Curve(nodes0, degree=1) >>> nodes1 = np.asfortranarray([ ... [1.0, 1.25, 1.0], ... [0.0, 0.5 , 1.0], ... ]) >>> edge1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [1.0, 2.0], ... [1.0, 1.0], ... ]) >>> edge2 = bezier.Curve(nodes2, degree=1) >>> nodes3 = np.asfortranarray([ ... [2.0, 1.0 , 0.0], ... [1.0, 0.75, 0.0] ... ]) >>> edge3 = bezier.Curve(nodes3, degree=2) >>> curved_poly = bezier.CurvedPolygon( ... edge0, edge1, edge2, edge3) >>> curved_poly <CurvedPolygon (num_sides=4)> .. testcleanup:: curved-polygon-constructor-invalid import make_images make_images.curved_polygon_constructor2(curved_poly) Args: edges (Tuple[~bezier.curve.Curve, ...]): The boundary edges of the curved polygon. kwargs: There are two keyword arguments accepted: * ``metadata`` (:class:`~typing.Sequence`): A sequence of triples associated with this curved polygon. This is intended to be used by callers that have created a curved polygon as an intersection between two B |eacute| zier triangles. * ``verify`` (:class:`bool`): Indicates if the edges should be verified as having shared endpoints. Defaults to :data:`True`. Other keyword arguments specified will be silently ignored. """ __slots__ = ("_edges", "_num_sides", "_metadata") def __init__(self, *edges, **kwargs): self._edges = edges self._num_sides = len(edges) self._metadata = kwargs.pop("metadata", None) if kwargs.pop("verify", True): self._verify() @staticmethod def _verify_pair(prev, curr): """Verify a pair of sides share an endpoint. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Args: prev (.Curve): "Previous" curve at piecewise junction. curr (.Curve): "Next" curve at piecewise junction. Raises: ValueError: If the previous side is not in 2D. ValueError: If consecutive sides don't share an endpoint. """ if prev._dimension != 2: raise ValueError("Curve not in R^2", prev) end = prev._nodes[:, -1] start = curr._nodes[:, 0] if not _helpers.vector_close(end, start): raise ValueError( "Not sufficiently close", "Consecutive sides do not have common endpoint", prev, curr, ) def _verify(self): """Verify that the edges define a curved polygon. This may not be entirely comprehensive, e.g. won't check self-intersection of the defined polygon. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Raises: ValueError: If there are fewer than two sides. ValueError: If one of the sides is not in 2D. ValueError: If consecutive sides don't share an endpoint. """ if self._num_sides < 2: raise ValueError("At least two sides required.") for prev, curr in zip(self._edges, self._edges[1:]): self._verify_pair(prev, curr) # Now we check that the final edge wraps around. prev = self._edges[-1] curr = self._edges[0] self._verify_pair(prev, curr) @property def num_sides(self): """int: The number of sides in the current polygon.""" return self._num_sides @property def __dict__(self): """dict: Dictionary of current curved polygon's property namespace. This is just a stand-in property for the usual ``__dict__``. This class defines ``__slots__`` so by default would not provide a ``__dict__``. This also means that the current object can't be modified by the returned dictionary. """ return {"_edges": self._edges, "_num_sides": self._num_sides} @property def area(self): r"""The area of the current curved polygon. This assumes, but does not check, that the current curved polygon is valid (i.e. it is bounded by the edges). This computes the area via Green's theorem. Using the vector field :math:`\mathbf{F} = \left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says .. math:: \int_{\mathcal{P}} 2 \, d\textbf{x} = \int_{\partial \mathcal{P}} -y \, dx + x \, dy (where :math:`\mathcal{P}` is the current curved polygon). Note that for a given edge :math:`C(r)` with control points :math:`x_j, y_j`, the integral can be simplified: .. math:: \int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr = \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d} b'_{j, d} \, dr where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials. Returns: float: The area of the current curved polygon. """ edges = tuple(edge._nodes for edge in self._edges) return _triangle_helpers.compute_area(edges) def __repr__(self): """Representation of current object. Returns: str: Object representation. """ return "<{} (num_sides={:d})>".format( self.__class__.__name__, self._num_sides ) def plot(self, pts_per_edge, color=None, ax=None): """Plot the current curved polygon. Args: pts_per_edge (int): Number of points to plot per curved edge. color (Optional[Tuple[float, float, float]]): Color as RGB profile. ax (Optional[matplotlib.artist.Artist]): matplotlib axis object to add plot to. Returns: matplotlib.artist.Artist: The axis containing the plot. This may be a newly created axis. """ if ax is None: ax = _plot_helpers.new_axis() _plot_helpers.add_patch(ax, color, pts_per_edge, *self._edges) return ax
apache-2.0
ruymanengithub/vison
vison/flat/BF01aux.py
1
6503
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Auxiliary Functions and resources to BF01. Created on Tue Jul 31 17:50:00 2018 :author: Ruyman Azzollini """ # IMPORT STUFF from pdb import set_trace as stop import numpy as np import os from collections import OrderedDict import string as st import pandas as pd from vison.flat import PTC0Xaux from vison.plot import figclasses from vison.plot import trends from vison.datamodel import cdp from vison.point import spot as spotmod # END IMPORT def get_CDP_lib(): covtable_cdp = cdp.Tables_CDP() covtable_cdp.rootname = 'BF01_COVTABLE' bftable_cdp = cdp.Tables_CDP() bftable_cdp.rootname = 'BF01_G15TABLE' bfFITtable_cdp = cdp.Tables_CDP() bfFITtable_cdp.rootname = 'BF01FIT_G15TABLE' profscov_cdp = cdp.CDP() profscov_cdp.rootname = 'profs_COV1D_BF01' profsker_cdp = cdp.CDP() profsker_cdp.rootname = 'profs_KER1D_BF01' CDP_lib = dict(COVTABLE=covtable_cdp, PROFSCOV1D=profscov_cdp, BFTABLE=bftable_cdp, BFfitTABLE=bfFITtable_cdp, PROFSKER1D=profsker_cdp) return CDP_lib prof_COV_ver_dict = dict( figname='BF01_COV_profs_ver.png', caption='BF01: COV 1D profiles, vertical/parallel direction.', meta=dict(doLegend=True, ylabel='COV/VAR, ADIM.', xlabel='Y', ylim=[-0.005,0.07], corekwargs=dict(), suptitle='BF01: COV 1D Profile, Vertical/Parallel.') ) prof_COV_ser_dict = dict( figname='BF01_COV_profs_ser.png', caption='BF01: COV 1D profiles, serial direction.', meta=dict(doLegend=True, ylabel='COV/VAR, ADIM.', xlabel='X', ylim=[-0.005,0.07], corekwargs=dict(), suptitle='BF01: COV 1D Profile, Serial.') ) prof_KER_ver_dict = dict( figname='BF01_KER_profs_ver.png', caption='BF01: KERNEL 1D profiles, vertical/parallel direction.', meta=dict(doLegend=True, ylabel='log([ADU])', xlabel='Y [pix]', #ylim=[-0.03, 0.1], corekwargs=dict(), suptitle='BF01: Kernels 1D Profile, Vertical/Parallel.') ) prof_KER_ser_dict = dict( figname='BF01_KER_profs_ser.png', caption='BF01: KERNEL 1D profiles, serial direction.', meta=dict(doLegend=True, ylabel='log([ADU])', xlabel='X [pix]', #ylim=[-0.03, 0.1], corekwargs=dict(), suptitle='BF01: Kernels 1D Profile, Serial.') ) FWHMx_v_flu_dict = dict( figname='BF01_FWHMx_v_flu.png', caption='BF01: FWHM(x) vs. Fluence.', meta=dict(doLegend=True, ylabel='FWHM(x), [um]', xlabel='ADU', ylim = [5.,15.], corekwargs=dict( data=dict(marker='o', linestyle=''), fit=dict(marker='', linestyle='--')), suptitle='BF01: FWHMx in microns vs. Fluence') ) FWHMy_v_flu_dict = dict( figname='BF01_FWHMy_v_flu.png', caption='BF01: FWHM(y) vs. Fluence.', meta=dict(doLegend=True, ylabel='FWHM(y), [um]', xlabel='ADU', ylim = [5.,15.], corekwargs=dict( data=dict(marker='o', linestyle=''), fit=dict(marker='', linestyle='--')), suptitle='BF01: FWHMy in microns vs. Fluence') ) def gt_PTC_curves_dict(test, BFEcorr='no'): nicetest = test.replace('_', '\_') if BFEcorr=='no': BFEtag = 'wBFE' BFEcap = 'BFE left in' elif BFEcorr == 'fludep': BFEtag = 'noBFE' BFEcap = 'BFE removed' elif BFEcorr == 'flufix': BFEtag = 'noBFEfixflu' BFEcap = 'BFE removed, mid-fluence Axij' return dict( figname='%s_PTC_curves_%s.png' % (test,BFEtag), caption='%s: PTC curves, %s. Theoretical line has a fixed gain of 3.5.' % (nicetest,BFEcap), meta=dict(doLegend=True, ylabel='VAR', xlabel='MED', xlim=[0., 2**16], ylim=[0., 2.**16 / 3.], suptitle='%s: PTC Curves.' % nicetest, corekwargs=dict(data=dict(marker='.', linestyle='', color='b'), theo=dict(marker='', linestyle='-', color='r')) )) def gt_BF01figs(test): BF01figs = dict() BF01figs['BF01checks_offsets'] = [ trends.Fig_Basic_Checkstat, PTC0Xaux.gt_check_offsets_dict(test)] BF01figs['BF01checks_deltaoff'] = [ trends.Fig_Basic_Checkstat, PTC0Xaux.gt_check_deltaoff_dict(test)] BF01figs['BF01checks_stds'] = [ trends.Fig_Basic_Checkstat, PTC0Xaux.gt_check_std_dict(test)] BF01figs['BF01checks_flu'] = [ trends.Fig_Basic_Checkstat, PTC0Xaux.gt_check_img_flu_dict(test)] BF01figs['BF01checks_imgstd'] = [ trends.Fig_Basic_Checkstat, PTC0Xaux.gt_check_img_std_dict(test)] BF01figs['BlueScreen'] = [figclasses.BlueScreen, dict()] # renaming to BF01... HACK keys_to_rename = ['caption', 'suptitle', 'figname'] for figkey in list(BF01figs.keys()): _dict = BF01figs[figkey][1] try: _mdict = BF01figs[figkey][1]['meta'] hasmeta = True except KeyError: hasmeta = False for key in keys_to_rename: if key in _dict: _dict[key] = _dict[key].replace(test, 'BF01') if hasmeta: if key in _mdict: _mdict[key] = _mdict[key].replace(test, 'BF01') BF01figs['BF01_COV_ver'] = [ figclasses.Fig_Beam2DPlot, prof_COV_ver_dict] BF01figs['BF01_COV_hor'] = [ figclasses.Fig_Beam2DPlot, prof_COV_ser_dict] BF01figs['BF01_KER_ver'] = [ figclasses.Fig_Beam2DPlot, prof_KER_ver_dict] BF01figs['BF01_KER_hor'] = [ figclasses.Fig_Beam2DPlot, prof_KER_ser_dict] BF01figs['BF01_fwhmx_v_flu'] = [ figclasses.Fig_Beam2DPlot, FWHMx_v_flu_dict] BF01figs['BF01_fwhmy_v_flu'] = [ figclasses.Fig_Beam2DPlot, FWHMy_v_flu_dict] BF01figs['BF01_PTC_BFE'] = [ figclasses.Fig_Beam2DPlot, gt_PTC_curves_dict(test, BFEcorr='no')] BF01figs['BF01_PTC_NOBFE'] = [ figclasses.Fig_Beam2DPlot, gt_PTC_curves_dict(test, BFEcorr='fludep')] BF01figs['BF01_PTC_NOBFEALT'] = [ figclasses.Fig_Beam2DPlot, gt_PTC_curves_dict(test, BFEcorr='flufix')] return BF01figs
gpl-3.0
siou83/trading-with-python
sandbox/spreadCalculations.py
78
1496
''' Created on 28 okt 2011 @author: jev ''' from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener from tradingWithPython.lib import yahooFinance from pandas import DataFrame, Series import numpy as np import matplotlib.pyplot as plt import os symbols = ['SPY','IWM'] y = yahooFinance.HistData('temp.csv') y.startDate = (2007,1,1) df = y.loadSymbols(symbols,forceDownload=False) #df = y.downloadData(symbols) res = readBiggerScreener('CointPairs.csv') #---check with spread scanner #sp = DataFrame(index=symbols) # #sp['last'] = df.ix[-1,:] #sp['targetCapital'] = Series({'SPY':100,'IWM':-100}) #sp['targetShares'] = sp['targetCapital']/sp['last'] #print sp #The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero) #s = Spread(symbols, histClose = df) #print s #s.value.plot() #print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns') #print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log') #print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard') #p = Portfolio(df) #p.setShares([1, -1.7]) #p.value.plot() quote = yahooFinance.getQuote(symbols) print quote s = Spread(symbols,histClose=df, estimateBeta = False) s.setLast(quote['last']) s.setShares(Series({'SPY':1,'IWM':-1.7})) print s #s.value.plot() #s.plot() fig = figure(2) s.plot()
bsd-3-clause
chengjunjian/tushare
tushare/util/dateu.py
27
2184
# -*- coding:utf-8 -*- import datetime import pandas as pd def year_qua(date): mon = date[5:7] mon = int(mon) return[date[0:4], _quar(mon)] def _quar(mon): if mon in [1, 2, 3]: return '1' elif mon in [4, 5, 6]: return '2' elif mon in [7, 8, 9]: return '3' elif mon in [10, 11, 12]: return '4' else: return None def today(): day = datetime.datetime.today().date() return str(day) def get_year(): year = datetime.datetime.today().year return year def get_month(): month = datetime.datetime.today().month return month def get_hour(): return datetime.datetime.today().hour def today_last_year(): lasty = datetime.datetime.today().date() + datetime.timedelta(-365) return str(lasty) def day_last_week(days=-7): lasty = datetime.datetime.today().date() + datetime.timedelta(days) return str(lasty) def diff_day(start=None, end=None): d1 = datetime.datetime.strptime(end, '%Y-%m-%d') d2 = datetime.datetime.strptime(start, '%Y-%m-%d') delta = d1 - d2 return delta.days def get_quarts(start, end): idx = pd.period_range('Q'.join(year_qua(start)), 'Q'.join(year_qua(end)), freq='Q-JAN') return [str(d).split('Q') for d in idx][::-1] holiday = ['2015-01-01', '2015-01-02', '2015-02-18', '2015-02-19', '2015-02-20', '2015-02-23', '2015-02-24', '2015-04-06', '2015-05-01', '2015-06-22', '2015-09-03', '2015-09-04', '2015-10-01', '2015-10-02', '2015-10-05', '2015-10-06', '2015-10-07'] def is_holiday(date): if isinstance(date, str): date = datetime.datetime.strptime(date, '%Y-%m-%d') today=int(date.strftime("%w")) if today > 0 and today < 6 and date not in holiday: return False else: return True def last_tddate(): today = datetime.datetime.today().date() today=int(today.strftime("%w")) if today == 0: return day_last_week(-2) else: return day_last_week(-1)
bsd-3-clause
magnunor/hyperspy
hyperspy/misc/holography/tools.py
4
3063
# -*- coding: utf-8 -*- # Copyright 2007-2017 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import numpy as np import matplotlib.pyplot as plt from scipy.fftpack import fft2, fftshift import logging _logger = logging.getLogger(__name__) def calculate_carrier_frequency(holo_data, sb_position, scale): """ Calculates fringe carrier frequency of a hologram Parameters ---------- holo_data: ndarray The data of the hologram. sb_position: tuple Position of the sideband with the reference to non-shifted FFT scale: tuple Scale of the axes that will be used for the calculation. Returns ------- Carrier frequency """ shape = holo_data.shape origins = [np.array((0, 0)), np.array((0, shape[1])), np.array((shape[0], shape[1])), np.array((shape[0], 0))] origin_index = np.argmin( [np.linalg.norm(origin - sb_position) for origin in origins]) return np.linalg.norm(np.multiply( origins[origin_index] - sb_position, scale)) def estimate_fringe_contrast_fourier( holo_data, sb_position, apodization='hanning'): """ Estimates average fringe contrast of a hologram by dividing amplitude of maximum pixel of sideband by amplitude of FFT's origin. Parameters ---------- holo_data: ndarray The data of the hologram. sb_position: tuple Position of the sideband with the reference to non-shifted FFT apodization: string, None Use 'hanning', 'hamming' or None to apply apodization window in real space before FFT Apodization is typically needed to suppress the striking due to sharp edges of the which often results in underestimation of the fringe contrast. (Default: 'hanning') Returns ------- Fringe contrast as a float """ holo_shape = holo_data.shape if apodization: if apodization == 'hanning': window_x = np.hanning(holo_shape[0]) window_y = np.hanning(holo_shape[1]) elif apodization == 'hamming': window_x = np.hamming(holo_shape[0]) window_y = np.hamming(holo_shape[1]) window_2d = np.sqrt(np.outer(window_x, window_y)) data = holo_data * window_2d else: data = holo_data fft_exp = fft2(data) return 2 * np.abs(fft_exp[tuple(sb_position)]) / np.abs(fft_exp[0, 0])
gpl-3.0
sgrid/pysgrid
demos/basic_interp.py
3
2452
import numpy as np import matplotlib.pyplot as plt import pysgrid node_lon = np.array(([1, 3, 5], [1, 3, 5], [1, 3, 5])) node_lat = np.array(([1, 1, 1], [3, 3, 3], [5, 5, 5])) edge2_lon = np.array(([0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6])) edge2_lat = np.array(([1, 1, 1, 1], [3, 3, 3, 3], [5, 5, 5, 5])) edge1_lon = np.array(([1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5])) edge1_lat = np.array(([0, 0, 0], [2, 2, 2], [4, 4, 4], [6, 6, 6])) center_lon = np.array(([0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6])) center_lat = np.array(([0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [6, 6, 6, 6])) sgrid = pysgrid.SGrid(node_lon=node_lon, node_lat=node_lat, edge1_lon=edge1_lon, edge1_lat=edge1_lat, edge2_lon=edge2_lon, edge2_lat=edge2_lat, center_lon=center_lon, center_lat=center_lat) c_var = np.array(([0, 0, 0, 0], [0, 1, 2, 0], [0, 2, 1, 0], [0, 0, 0, 0])) e2_var = np.array(([1, 0, 0, 1], [0, 1, 2, 0], [0, 0, 0, 0])) e1_var = np.array(([1, 1, 0], [0, 1, 0], [0, 2, 0], [1, 1, 0])) n_var = np.array(([0, 1, 0], [1, 0, 1], [0, 1, 0])) ptsx, ptsy = np.mgrid[0:6:600j, 0:6:600j] pts = np.stack((ptsx, ptsy), axis=-1) interp_c = sgrid.interpolate_var_to_points(pts, c_var).reshape(600, 600) interp_e1 = sgrid.interpolate_var_to_points(pts, e1_var).reshape(600, 600).T interp_e2 = sgrid.interpolate_var_to_points(pts, e2_var).reshape(600, 600).T interp_n = sgrid.interpolate_var_to_points(pts, n_var).reshape(600, 600) plt.subplot(221) plt.imshow(interp_c, extent=(0, 6, 0, 6), origin='lower') plt.vlines(center_lon, center_lat[0], center_lat[-1]) plt.hlines(center_lon, center_lat[0], center_lat[-1]) plt.title('rho grid interpolation') plt.subplot(222) plt.imshow(interp_e1, extent=(0, 6, 0, 6), origin='lower') plt.vlines(edge2_lon, center_lat[0], center_lat[-1]) plt.hlines(center_lon, edge1_lat[0], edge1_lat[-1]) plt.title('u grid interpolation') plt.subplot(223) plt.imshow(interp_e2, extent=(0, 6, 0, 6), origin='lower') plt.vlines(center_lon, node_lat[0], node_lat[-1]) plt.hlines(edge2_lon, center_lat[0], center_lat[-1]) plt.title('v grid interpolation') plt.subplot(224) plt.imshow(interp_n, extent=(0, 6, 0, 6), origin='lower') plt.vlines(node_lon, node_lat[0], node_lat[-1]) plt.hlines(node_lon, node_lat[0], node_lat[-1]) plt.title('psi grid interpolation') plt.show()
bsd-3-clause
lucalianas/openmicroscopy
components/tools/OmeroPy/src/omero/install/jvmcfg.py
2
16253
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 Glencoe Software, Inc. All Rights Reserved. # Use is subject to license terms supplied in LICENSE.txt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ Automatic configuration of memory settings for Java servers. """ from types import StringType from shlex import split import logging LOGGER = logging.getLogger("omero.install.jvmcfg") def strip_dict(map, prefix=("omero", "jvmcfg"), suffix=(), limit=1): """ For the given dictionary, return a copy of the dictionary where all entries not matching the prefix, suffix, and limit have been removed and where all remaining keys have had the prefix and suffix stripped. The limit describes the number of elements that are allowed in the new key after stripping prefix and suffix. """ if isinstance(prefix, StringType): prefix = tuple(prefix.split(".")) if isinstance(suffix, StringType): suffix = tuple(suffix.split(".")) rv = dict() if not map: return dict() def __strip_dict(k, v, prefix, suffix, rv): key = tuple(k.split(".")) ksz = len(key) psz = len(prefix) ssz = len(suffix) if ksz <= (psz + ssz): return # No way to strip if smaller if key[0:psz] == prefix and key[ksz-ssz:] == suffix: newkey = key[psz:ksz-ssz] if len(newkey) == limit: newkey = ".".join(newkey) rv[newkey] = v for k, v in map.items(): __strip_dict(k, v, prefix, suffix, rv) return rv class StrategyRegistry(dict): def __init__(self, *args, **kwargs): super(dict, self).__init__(*args, **kwargs) STRATEGY_REGISTRY = StrategyRegistry() class Settings(object): """ Container for the config options found in etc/grid/config.xml """ def __init__(self, server_values=None, global_values=None): if server_values is None: self.__server = dict() else: self.__server = server_values if global_values is None: self.__global = dict() else: self.__global = global_values self.__static = { "strategy": PercentStrategy, "append": "", "perm_gen": "128m", "heap_dump": "off", "heap_size": "512m", "system_memory": None, "max_system_memory": "48000", "min_system_memory": "3414", } self.__manual = dict() def __getattr__(self, key): return self.lookup(key) def lookup(self, key, default=None): if key in self.__manual: return self.__manual[key] elif key in self.__server: return self.__server[key] elif key in self.__global: return self.__global[key] elif key in self.__static: return self.__static[key] else: return default def overwrite(self, key, value, always=False): if self.was_set(key) and not always: # Then we leave it as the user requested return else: self.__manual[key] = value def was_set(self, key): return key in self.__server or key in self.__global def get_strategy(self): return STRATEGY_REGISTRY.get(self.strategy, self.strategy) def __str__(self): rv = dict() rv.update(self.__server) rv.update(self.__global) if not rv: rv = "" return 'Settings(%s)' % rv class Strategy(object): """ Strategy for calculating memory settings. Primary class of the memory module. """ def __init__(self, name, settings=None): """ 'name' argument should likely be one of: ('blitz', 'indexer', 'pixeldata', 'repository') """ if settings is None: settings = Settings() self.name = name self.settings = settings if type(self) == Strategy: raise Exception("Must subclass!") # Memory helpers def system_memory_mb(self): """ Returns a tuple, in MB, of available, active, and total memory. "total" memory is found by calling to first a Python library (if installed) and otherwise a Java class. If "system_memory" is set, it will short-circuit both methods. "active" memory is set to "total" but limited by "min_system_memory" and "max_system_memory". "available" may not be accurate, and in some cases will be set to total. """ available, total = None, None if self.settings.system_memory is not None: total = int(self.settings.system_memory) available = total else: pymem = self._system_memory_mb_psutil() if pymem is not None: available, total = pymem else: available, total = self._system_memory_mb_java() max_system_memory = int(self.settings.max_system_memory) min_system_memory = int(self.settings.min_system_memory) active = max(min(total, max_system_memory), min_system_memory) return available, active, total def _system_memory_mb_psutil(self): try: import psutil pymem = psutil.virtual_memory() return (pymem.free/1000000, pymem.total/1000000) except ImportError: LOGGER.debug("No psutil installed") return None def _system_memory_mb_java(self): import omero.cli import omero.java # Copied from db.py. Needs better dir detection cwd = omero.cli.CLI().dir server_jar = cwd / "lib" / "server" / "server.jar" cmd = ["ome.services.util.JvmSettingsCheck", "--psutil"] p = omero.java.popen(["-cp", str(server_jar)] + cmd) o, e = p.communicate() if p.poll() != 0: LOGGER.warn("Failed to invoke java:\nout:%s\nerr:%s", o, e) rv = dict() for line in o.split("\n"): line = line.strip() if not line: continue parts = line.split(":") if len(parts) == 1: parts.append("") rv[parts[0]] = parts[1] try: free = long(rv["Free"]) / 1000000 except: LOGGER.warn("Failed to parse Free from %s", rv) free = 2000 try: total = long(rv["Total"]) / 1000000 except: LOGGER.warn("Failed to parse Total from %s", rv) total = 4000 return (free, total) # API Getters def get_heap_size(self, sz=None): if sz is None or self.settings.was_set("heap_size"): sz = self.settings.heap_size if str(sz).startswith("-X"): return sz else: rv = "-Xmx%s" % sz if rv[-1].lower() not in ("b", "k", "m", "g"): rv = "%sm" % rv return rv def get_heap_dump(self): hd = self.settings.heap_dump if hd == "off": return "" elif hd in ("on", "cwd", "tmp"): return "-XX:+HeapDumpOnOutOfMemoryError" def get_perm_gen(self): pg = self.settings.perm_gen if str(pg).startswith("-XX"): return pg else: return "-XX:MaxPermSize=%s" % pg def get_append(self): values = [] if self.settings.heap_dump == "tmp": import tempfile tmp = tempfile.gettempdir() values.append("-XX:HeapDumpPath=%s" % tmp) return values + split(self.settings.append) def get_memory_settings(self): values = [ self.get_heap_size(), self.get_heap_dump(), self.get_perm_gen(), ] if any([x.startswith("-XX:MaxPermSize") for x in values]): values.append("-XX:+IgnoreUnrecognizedVMOptions") values += self.get_append() return [x for x in values if x] class ManualStrategy(Strategy): """ Simplest strategy which assumes all values have been set and simply uses them or their defaults. """ class PercentStrategy(Strategy): """ Strategy based on a percent of available memory. """ PERCENT_DEFAULTS = ( ("blitz", 15), ("pixeldata", 15), ("indexer", 10), ("repository", 10), ("other", 1), ) def __init__(self, name, settings=None): super(PercentStrategy, self).__init__(name, settings) self.defaults = dict(self.PERCENT_DEFAULTS) self.use_active = True def get_heap_size(self): """ Uses the results of the default settings of calculate_heap_size() as an argument to get_heap_size(), in other words some percent of the active memory. """ sz = self.calculate_heap_size() return super(PercentStrategy, self).get_heap_size(sz) def get_percent(self): other = self.defaults.get("other", "1") default = self.defaults.get(self.name, other) percent = int(self.settings.lookup("percent", default)) return percent def get_perm_gen(self): available, active, total = self.system_memory_mb() choice = self.use_active and active or total if choice <= 4000: if choice >= 2000: self.settings.overwrite("perm_gen", "256m") elif choice <= 8000: self.settings.overwrite("perm_gen", "512m") else: self.settings.overwrite("perm_gen", "1g") return super(PercentStrategy, self).get_perm_gen() def calculate_heap_size(self, method=None): """ Re-calculates the appropriate heap size based on the value of get_percent(). The "active" memory returned by method() will be used by default, but can be modified to use "total" via the "use_active" flag. """ if method is None: method = self.system_memory_mb available, active, total = method() choice = self.use_active and active or total percent = self.get_percent() calculated = choice * int(percent) / 100 return calculated def usage_table(self, min=10, max=20): total_mb = [2**x for x in range(min, max)] for total in total_mb: method = lambda: (total, total, total) yield total, self.calculate_heap_size(method) STRATEGY_REGISTRY["manual"] = ManualStrategy STRATEGY_REGISTRY["percent"] = PercentStrategy def read_settings(template_xml): """ Read the memory settings from the temlates file """ rv = dict() for template in template_xml.findall("server-template"): for server in template.findall("server"): for option in server.findall("option"): o = option.text if o.startswith("-Xmx") | o.startswith("-XX"): rv.setdefault(server.get('id'), []).append(o) return rv def adjust_settings(config, template_xml, blitz=None, indexer=None, pixeldata=None, repository=None): """ Takes an omero.config.ConfigXml object and adjusts the memory settings. Primary entry point to the memory module. """ from xml.etree.ElementTree import Element from collections import defaultdict replacements = dict() options = dict() for template in template_xml.findall("server-template"): for server in template.findall("server"): for option in server.findall("option"): o = option.text if o.startswith("MEMORY:"): options[o[7:]] = (server, option) for props in server.findall("properties"): for prop in props.findall("property"): name = prop.attrib.get("name", "") if name.startswith("REPLACEMENT:"): replacements[name[12:]] = (server, prop) rv = defaultdict(list) m = config.as_map() loop = (("blitz", blitz), ("indexer", indexer), ("pixeldata", pixeldata), ("repository", repository)) for name, StrategyType in loop: if name not in options: raise Exception( "Cannot find %s option. Make sure templates.xml was " "not copied from an older server" % name) for name, StrategyType in loop: specific = strip_dict(m, suffix=name) defaults = strip_dict(m) settings = Settings(specific, defaults) rv[name].append(settings) if StrategyType is None: StrategyType = settings.get_strategy() if not callable(StrategyType): raise Exception("Bad strategy: %s" % StrategyType) strategy = StrategyType(name, settings) settings = strategy.get_memory_settings() server, option = options[name] idx = 0 for v in settings: rv[name].append(v) if idx == 0: option.text = v else: elem = Element("option") elem.text = v server.insert(idx, elem) idx += 1 # Now we check for any other properties and # put them where the replacement should go. for k, v in m.items(): r = [] suffix = ".%s" % name size = len(suffix) if k.endswith(suffix): k = k[:-size] r.append((k, v)) server, replacement = replacements[name] idx = 0 for k, v in r: if idx == 0: replacement.attrib["name"] = k replacement.attrib["value"] = v else: elem = Element("property", name=k, value=v) server.append(elem) return rv def usage_charts(path, min=0, max=20, Strategy=PercentStrategy, name="blitz"): # See http://matplotlib.org/examples/pylab_examples/anscombe.html from pylab import array from pylab import axis from pylab import gca from pylab import subplot from pylab import plot from pylab import setp from pylab import savefig from pylab import text points = 200 x = array([2 ** (x / points) / 1000 for x in range(min*points, max*points)]) y_configs = ( (Settings({}), 'A'), (Settings({"percent": "20"}), 'B'), (Settings({}), 'C'), (Settings({"max_system_memory": "10000"}), 'D'), ) def f(cfg): s = Strategy(name, settings=cfg[0]) y = [] for total in x: method = lambda: (total, total, total) y.append(s.calculate_heap_size(method)) return y y1 = f(y_configs[0]) y2 = f(y_configs[1]) y3 = f(y_configs[2]) y4 = f(y_configs[3]) axis_values = [0, 20, 0, 6] def ticks_f(): setp(gca(), xticks=(8, 16), yticks=(2, 4)) def text_f(which): cfg = y_configs[which] # s = cfg[0] txt = "%s" % (cfg[1],) text(2, 2, txt, fontsize=20) subplot(221) plot(x, y1) axis(axis_values) text_f(0) ticks_f() subplot(222) plot(x, y2) axis(axis_values) text_f(1) ticks_f() subplot(223) plot(x, y3) axis(axis_values) text_f(2) ticks_f() subplot(224) plot(x, y4) axis(axis_values) text_f(3) ticks_f() savefig(path)
gpl-2.0
LLNL/spack
var/spack/repos/builtin/packages/py-misopy/package.py
5
1114
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyMisopy(PythonPackage): """MISO (Mixture of Isoforms) is a probabilistic framework that quantitates the expression level of alternatively spliced genes from RNA-Seq data, and identifies differentially regulated isoforms or exons across samples.""" homepage = "http://miso.readthedocs.io/en/fastmiso/" url = "https://pypi.io/packages/source/m/misopy/misopy-0.5.4.tar.gz" version('0.5.4', sha256='377a28b0c254b1920ffdc2d89cf96c3a21cadf1cf148ee6d6ef7a88ada067dfc') depends_on('py-setuptools', type='build') depends_on('python@2.6:', type=('build', 'run')) depends_on('py-numpy@1.5.0:', type=('build', 'run')) depends_on('py-scipy@0.9.0:', type=('build', 'run')) depends_on('py-pysam@0.6.0:', type=('build', 'run')) depends_on('py-matplotlib', type=('build', 'run')) depends_on('samtools') depends_on('bedtools2')
lgpl-2.1
rkuchan/Tax-Calculator
taxcalc/tests/test_records.py
3
1742
import os import sys CUR_PATH = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(CUR_PATH, "../../")) import numpy as np from numpy.testing import assert_array_equal import pandas as pd import pytest import tempfile from numba import jit, vectorize, guvectorize from taxcalc import * from taxcalc.utils import expand_array tax_dta_path = os.path.join(CUR_PATH, "../../tax_all1991_puf.gz") def test_create_records(): r = Records(tax_dta_path) assert r def test_create_records_from_file(): r = Records.from_file(tax_dta_path) assert r def test_imputation(): e17500 = np.array([20., 4.4, 5.]) e00100 = np.array([40., 8.1, 90.1]) e18400 = np.array([25., 34., 10.]) e18425 = np.array([42., 20.3, 49.]) e62100 = np.array([75., 12.4, 84.]) e00700 = np.array([43.3, 34.1, 3.4]) e04470 = np.array([21.2, 12., 13.1]) e21040 = np.array([45.9, 3., 45.]) e18500 = np.array([33.1, 18.2, 39.]) e20800 = np.array([0.9, 32., 52.1]) cmbtp_itemizer = np.array([68.4, -31.0025, -84.7]) """ Test case values: x = max(0., e17500 - max(0., e00100) * 0.075) = [17., 3.7925, 0] medical_adjustment = min(x, 0.025 * max(0.,e00100)) = [-1.,-.2025,0] state_adjustment = max(0, max(e18400, e18425)) = [42., 34., 49.] _cmbtp_itemizer = (e62100 - medical_adjustment + e00700 + e04470 + e21040 - z - e00100 - e18500 - e20800) = [68.4, -31.0025 ,-84.7] """ test_itemizer = records.imputation(e17500, e00100, e18400, e18425, e62100, e00700, e04470, e21040, e18500, e20800) assert(np.allclose(cmbtp_itemizer, test_itemizer))
mit
VisualComputingInstitute/towards-reid-tracking
track.py
1
15761
#TODO: comments/doc import numpy as np from filterpy.kalman import KalmanFilter import scipy from scipy import ndimage from scipy import signal from scipy.linalg import block_diag,inv from filterpy.common import Q_discrete_white_noise from filterpy.stats import plot_covariance_ellipse import matplotlib.pyplot as plt from os.path import join as pjoin import lib import lbtoolbox.plotting as lbplt # all_bs for bbox regression all_bs = np.array([[256.3190, -0.0207, 136.6533, 0.1978], [212.9634, 0.0055, 126.0157, 0.2036], [277.3869, -0.0154, 5.2019, 0.4442], [-296.1867, 0.3356, 54.3528, 0.3093], [258.1709, -0.0258, 144.2437, 0.2030], [152.2878, 0.0296, -271.9162, 0.6985], [208.9894, 0.0349, -298.6897, 0.7266], [170.6156, 0.0128, 81.8043, 0.1659]]) HOT_CMAP = lib.get_transparent_colormap() class Track(object): """ Implements a track (not a tracker, a track). With KalmanFilter and some other stuff like status for track management Attributes ---------- TODO: Move to time using dt """ def __init__(self, embed_crops_fn, curr_frame, init_pose, image, state_shape, state_pad, output_shape, track_id=-1, dist_thresh=7, entropy_thresh=0.10, unmiss_thresh=2, delete_thresh=90, tp_hack=None, maxlife=None, debug_out_dir=None): self.embed_crops_fn = embed_crops_fn self.debug_out_dir = debug_out_dir init_x = [0.0, 0.0] #self.init_P_scale = 200.0 #self.init_P_scale = 5.0 self.init_P_scale = 5.0**2 self.DIST_THRESH = dist_thresh self.ENT_THRESH = entropy_thresh #self.VEL_MEAS_CERT_THRESH = 0.015 self.KF = KalmanFilter(dim_x=2, dim_z=2) self.KF.F = np.array([[1, 0], [0, 1]], dtype=np.float64) #q = Q_discrete_white_noise(dim=2, dt=dt, var=200.) #self.KF.Q = block_diag(q, q) # TODO: matrix design for all the filters #self.KF.Q = q # heatmap v only # 0.02 #self.KF.Q = 0.02*np.eye(2) # Process noise. Always added to prediction. Higher = uncertainty grows faster when no measurement self.KF.Q = 0.3**2*np.eye(2) # Process noise. Always added to prediction. Higher = uncertainty grows faster when no measurement self.KF.H = np.array([[1, 0], [0, 1]], dtype=np.float64) #self.KF.R = 100.0*np.eye(2) # Measurement variance. Lower: jump more to measurement self.KF.R = 20.0**2*np.eye(2) # Lower: jump more to measurement self.KF.x = init_x self.KF.P = self.init_P_scale*np.eye(2) self.track_id = track_id self.color = np.random.rand(3) self.hm_colormap = lbplt.linear_map((1,1,1), self.color) self.hm_colormap = lib.get_transparent_colormap(self.hm_colormap) self.xs=[self.KF.x] self.Ps=[self.KF.P] self.missed_for = 0 self.missed_sightings = 0 self.deleted_at = 0 self.last_matched_at = curr_frame self.created_at = curr_frame self.n_exits = 0 self.status = 'matched' # matched, missed, deleted self.age = 1 #age in frames self.MAXLIFE = maxlife self.TP_HACK = tp_hack #missed for [delete_thresh] times? delete! #self.DELETE_THRESH = 300 #90 # 1.5s self.DELETE_THRESH = delete_thresh # 1.5s # How many times do I need to see him while he's missing to un-miss him? self.UNMISS_THRESH = unmiss_thresh self.state_shape = state_shape self.state_pad = state_pad self.output_shape = output_shape pad_y, pad_x = state_pad[0][0], state_pad[1][0] self.poses=[np.array([init_pose[0]+pad_x, init_pose[1]+pad_y])] self.embedding = None self.update_embedding(self.get_embedding_at_current_pos(image, curr_frame)) def init_heatmap(self, heatmap): #self.pos_heatmap = self.resize_map_to_state(np.full_like(heatmap, 1/np.prod(heatmap.shape))) self.pos_heatmap = self.resize_map_to_state(heatmap) self.old_heatmap = None #self.id_heatmap = np.full_like(heatmap, 1/np.prod(self.pos_heatmap.shape)) self.id_heatmap = self.resize_map_to_state(np.full_like(heatmap, 1/np.prod(heatmap.shape))) self.idmap_ent = 0.0 #lib.entropy_score_avg(self.id_heatmap) self.idmap_score = 9999 # np.min(id_distmap) self.this_map_good = False #self.idmap_score < self.DIST_THRESH and self.ENT_THRESH < self.idmap_ent # ==Heatmap stuff== def resize_map_to_state(self, heatmap, keep_sum=True): assert heatmap.shape == self.state_shape, "Lying Lucas giving me a heatmap that's not state-shaped!" #hm = np.pad(heatmap, self.state_pad, mode='constant', constant_values=1/np.prod(heatmap.shape)) hm = np.pad(heatmap, self.state_pad, mode='edge') if keep_sum: hm /= np.sum(hm)*np.sum(heatmap) return hm #return lib.resize_map(heatmap, self.state_shape, interp='bicubic') def unpad_state_map(self, statemap): return statemap[self.state_pad[0][0]:-self.state_pad[0][1], self.state_pad[1][0]:-self.state_pad[1][1]] def get_crop_at_pos(self,pos,image): # TODO: fix bb: 128x48 x, y = pos box_c = lib.box_centered(x, y, 128, 48, bounds=(0,0,image.shape[1],image.shape[0])) crop = lib.cutout_abs_hwc(image, box_c) return crop def get_embedding_at_current_pos(self, image, debug_curr_frame): crop = self.get_crop_at_pos( self.state_to_output(*self.poses[-1], output_shape=(image.shape[0], image.shape[1])), image ) if self.debug_out_dir is not None: lib.imwrite(pjoin(self.debug_out_dir, 'crops', '{}-{}.jpg'.format(self.track_id, debug_curr_frame)), crop) return self.embed_crops_fn(crop[None], fake_id=self.track_id)[0] def update_embedding(self, new_embedding): if self.embedding is None: self.embedding = new_embedding self.n_embs_seen = 1 else: return # For this paper, we ignore new embeddings as the first is almost perfect. #self.embedding = self.embedding*self.n_embs_seen + new_embedding #self.n_embs_seen += 1 #self.embedding /= self.n_embs_seen # ==Track state== def state_to_output(self, x, y, output_shape=None, ignore_padding=False): """ The optional `output_shape` is in (H,W) format. """ if output_shape is None: output_shape = self.output_shape if not ignore_padding: x = x - self.state_pad[1][0] y = y - self.state_pad[0][0] return np.array([ x/self.state_shape[1]*output_shape[1], y/self.state_shape[0]*output_shape[0] ]) def states_to_outputs(self, xy, output_shape, ignore_padding=False): # xy is of shape (N,2) if output_shape is None: output_shape = self.output_shape if not ignore_padding: xy = xy - np.array([[self.state_pad[1][0], self.state_pad[0][0]]]) factors = [output_shape[1]/self.state_shape[1], output_shape[0]/self.state_shape[0]] return xy*factors def estimate_peak_xy(self, heatmap): #return lib.argmax2d_xy(heatmap) return lib.expected_xy(heatmap, magic_thresh=2) def get_velocity_estimate(self, old_heatmap, pos_heatmap): old_peak = self.estimate_peak_xy(old_heatmap) new_peak = self.estimate_peak_xy(pos_heatmap) return new_peak - old_peak def track_predict(self): vx, vy = self.KF.x #self.pred_heatmap = scipy.ndimage.shift(self.pos_heatmap, [vy, vx]) gaussian = lib.gauss2d_xy(np.clip(self.KF.P, 1e-5, self.init_P_scale), nstd=2, mean=[-vx, -vy]) self.pred_heatmap = lib.convolve_edge_same(self.pos_heatmap, gaussian) self.pred_heatmap /= np.sum(self.pred_heatmap) # Re-normalize to probabilities # standard KF self.KF.predict() def track_update(self, id_heatmap, id_distmap, curr_frame, image_getter): self.age += 1 # Hard rule for pathological cases. if self.MAXLIFE is not None and self.MAXLIFE < self.age: print("WARNING: Killing one of age.") return self.track_is_deleted(curr_frame) self.old_heatmap = self.pos_heatmap self.old_map_good = self.this_map_good self.id_heatmap = self.resize_map_to_state(id_heatmap) self.idmap_ent = lib.entropy_score_avg(self.id_heatmap) self.idmap_score = np.min(id_distmap) self.this_map_good = self.idmap_score < self.DIST_THRESH and self.ENT_THRESH < self.idmap_ent if self.this_map_good: self.pos_heatmap = self.pred_heatmap*self.id_heatmap self.pos_heatmap /= np.sum(self.pos_heatmap) # Re-normalize to probabilities # Discard impossible jumps. TODO: It's a hack if self.TP_HACK is not None: xy = self.estimate_peak_xy(self.pos_heatmap) tpdist = np.sqrt(np.sum((self.poses[-1] - xy)**2)) if tpdist > self.TP_HACK: self.pos_heatmap = self.pred_heatmap self.this_map_good = False else: self.pos_heatmap = self.pred_heatmap #self.pos_heatmap = self.pred_heatmap*lib.softmax(self.id_heatmap, T=10) #self.pos_heatmap /= np.sum(self.pos_heatmap) # Re-normalize to probabilities #self.pos_heatmap = self.pred_heatmap*self.id_heatmap #self.pos_heatmap /= np.sum(self.pos_heatmap) # Re-normalize to probabilities # Compute a velocity measurement from previous and current peaks in heatmap. # The certainty of the velocity measurement is a function of the certainties of # both position "measurements", i.e. how peaky both heatmaps are. #self.vel_meas_certainty = lib.entropy_score_avg(self.old_heatmap)*lib.entropy_score_avg(self.pos_heatmap) #self.vel_meas_certainty = prev_id_heatmap_ent*this_id_heatmap_ent #if self.VEL_MEAS_CERT_THRESH < self.vel_meas_certainty: if self.old_map_good and self.this_map_good: vel_measurement = self.get_velocity_estimate(self.old_heatmap, self.pos_heatmap) #self.KF.R = ... self.KF.update(vel_measurement) self.xs.append(self.KF.x) self.Ps.append(self.KF.P) self.poses.append(self.estimate_peak_xy(self.pos_heatmap)) if self.this_map_good: self.track_is_matched(curr_frame) # update embedding. Needs to happen after the above, as that updates current_pos. # TODO: Future work. Currently we only keep initial one. #self.update_embedding(self.get_embedding_at_current_pos(image_getter(), curr_frame)) else: self.track_is_missed(curr_frame) # ==Track status management== def track_is_missed(self, curr_frame): self.missed_for += 1 self.status = 'missed' if self.missed_for >= self.DELETE_THRESH: # or self.n_exits > 10: self.track_is_deleted(curr_frame) else: pass # TODO: Such "exit zones" are a workaround, a larger-than-image map would be better. #x, y = self.poses[-1] #vx, vy = self.xs[-1] #if (x == 0 and vx < 0) or \ # (x == self.pos_heatmap.shape[1]-1 and 0 < vx) or \ # (y == 0 and vy < 0) or \ # (y == self.pos_heatmap.shape[0]-1 and 0 < vy): # self.n_exits += 1 def track_is_matched(self, curr_frame): if 0 < self.missed_for: # Been missing until now, but... self.missed_sightings += 1 # ...Only revive if seen enough times! if self.missed_sightings < self.UNMISS_THRESH: return self.last_matched_at = curr_frame self.status = 'matched' self.missed_for = 0 self.missed_sightings = 0 self.n_exits = 0 def track_is_deleted(self,curr_frame): self.deleted_at = curr_frame self.status = 'deleted' # ==Evaluation== def get_track_eval_line(self, cid, frame): #dukeMTMC format #[cam, ID, frame, left, top, width, height, worldX, worldY] cX, cY = self.state_to_output(*self.poses[-1]) h = int(((all_bs[cid-1][0]+all_bs[cid-1][1]*cX) + (all_bs[cid-1][2]+all_bs[cid-1][3]*cY))/2) w = int(0.4*h) l = int(cX-w/2) t = int(cY-h/2) # id-shift-quick-hack for multi-cam eval. return [cid, self.track_id+cid*100000, lib.glob2loc(frame, cid), l, t, w, h, -1, -1] # ==Visualization== def plot_track(self, ax, plot_past_trajectory=False, output_shape=None, time_scale=1): if output_shape is None: output_shape = self.output_shape if self.status == 'deleted': return #plot_covariance_ellipse((self.KF.x[0], self.KF.x[2]), self.KF.P, fc=self.color, alpha=0.4, std=[1,2,3]) #print(self.poses) cX, cY = self.state_to_output(*self.poses[-1], output_shape=output_shape) vX, vY = self.state_to_output(*self.xs[-1], output_shape=output_shape, ignore_padding=True)*time_scale #print('vX: {}, vY: {}'.format(vX,vY)) ax.plot(cX, cY, color=self.color, marker='o') ax.arrow(cX, cY, vX, vY, head_width=20, head_length=7, fc=self.color, ec=self.color, linestyle='--') # TODO: The cov is not in output space! #plot_covariance_ellipse((cX+vX, cY+vY), self.Ps[-1], fc=self.color, alpha=0.5, std=[1, 2, 3]) #plt.text(*self.state_to_output(*self.poses[-1], output_shape=output_shape), s='{}'.format(self.embedding)) if plot_past_trajectory and len(self.poses)>1: outputs_xy = self.states_to_outputs(np.array(self.poses), output_shape) ax.plot(*outputs_xy.T, linewidth=2.0, color=self.color) def _plot_heatmap(self, ax, hm, output_shape=None): if self.status == 'deleted': return if output_shape is None: output_shape = self.output_shape return ax.imshow(self.unpad_state_map(hm), interpolation='none', cmap=self.hm_colormap, #clim=(0, lib.ramp(lib.entropy_score(hm), 0.2, 1, 0.8, np.max(hm))), #alpha=0.5, extent=[0, output_shape[1], output_shape[0], 0]) def plot_pos_heatmap(self, ax, output_shape=None): hm = self._plot_heatmap(ax, self.pos_heatmap, output_shape) vX, vY = self.state_to_output(*self.xs[-1], output_shape=output_shape, ignore_padding=True) ax.text(*self.state_to_output(*self.poses[-1], output_shape=output_shape), s='{:.2f} ({:.2f}, {:.2f})'.format(np.sqrt(vX*vX + vY*vY), vX, vY)) return hm def plot_pred_heatmap(self, ax, output_shape=None): hm = self._plot_heatmap(ax, self.pred_heatmap, output_shape) if hasattr(self, 'vel_meas_certainty'): ax.text(*self.state_to_output(*self.poses[-1], output_shape=output_shape), s='{:.8f}'.format(self.vel_meas_certainty)) return hm def plot_id_heatmap(self, ax, output_shape=None): hm = self._plot_heatmap(ax, self.id_heatmap, output_shape) if hasattr(self, 'idmap_score'): ax.text(*self.state_to_output(*self.poses[-1], output_shape=output_shape), s='{:.2f} | {:.3f}'.format(self.idmap_score, self.idmap_ent)) return hm
mit
Srisai85/scikit-learn
sklearn/setup.py
225
2856
import os from os.path import join import warnings def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info, BlasNotFoundError import numpy libraries = [] if os.name == 'posix': libraries.append('m') config = Configuration('sklearn', parent_package, top_path) config.add_subpackage('__check_build') config.add_subpackage('svm') config.add_subpackage('datasets') config.add_subpackage('datasets/tests') config.add_subpackage('feature_extraction') config.add_subpackage('feature_extraction/tests') config.add_subpackage('cluster') config.add_subpackage('cluster/tests') config.add_subpackage('covariance') config.add_subpackage('covariance/tests') config.add_subpackage('cross_decomposition') config.add_subpackage('decomposition') config.add_subpackage('decomposition/tests') config.add_subpackage("ensemble") config.add_subpackage("ensemble/tests") config.add_subpackage('feature_selection') config.add_subpackage('feature_selection/tests') config.add_subpackage('utils') config.add_subpackage('utils/tests') config.add_subpackage('externals') config.add_subpackage('mixture') config.add_subpackage('mixture/tests') config.add_subpackage('gaussian_process') config.add_subpackage('gaussian_process/tests') config.add_subpackage('neighbors') config.add_subpackage('neural_network') config.add_subpackage('preprocessing') config.add_subpackage('manifold') config.add_subpackage('metrics') config.add_subpackage('semi_supervised') config.add_subpackage("tree") config.add_subpackage("tree/tests") config.add_subpackage('metrics/tests') config.add_subpackage('metrics/cluster') config.add_subpackage('metrics/cluster/tests') # add cython extension module for isotonic regression config.add_extension( '_isotonic', sources=['_isotonic.c'], include_dirs=[numpy.get_include()], libraries=libraries, ) # some libs needs cblas, fortran-compiled BLAS will not be sufficient blas_info = get_info('blas_opt', 0) if (not blas_info) or ( ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])): config.add_library('cblas', sources=[join('src', 'cblas', '*.c')]) warnings.warn(BlasNotFoundError.__doc__) # the following packages depend on cblas, so they have to be build # after the above. config.add_subpackage('linear_model') config.add_subpackage('utils') # add the test directory config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
prabhjyotsingh/incubator-zeppelin
python/src/main/resources/python/mpl_config.py
41
3653
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module provides utitlites for users to configure the inline plotting # backend through a PyZeppelinContext instance (eg, through z.configure_mpl()) import matplotlib def configure(**kwargs): """ Generic configure function. Usage: configure(prop1='foo', prop2='bar', ...) Currently supported zeppelin-specific properties are: interactive - If true show all figures without explicit call to show() via a post-execute hook. angular - If true, bind figures to angular display system. close - If true, close all figures once shown. width, height - Default width / height of the figure in pixels. fontsize - Font size. dpi - dpi of the figure. fmt - Figure format supported_formats - Supported Figure formats () context - ZeppelinContext instance (requires PY4J) """ _config.update(**kwargs) # Broadcast relevant changes to matplotlib RC _on_config_change() def get(key): """ Get the configuration info given a key """ return _config[key] def _on_config_change(): # dpi dpi = _config['dpi'] # For older versions of matplotlib, savefig.dpi is not synced with # figure.dpi by default matplotlib.rcParams['figure.dpi'] = dpi if matplotlib.__version__ < '2.0.0': matplotlib.rcParams['savefig.dpi'] = dpi # Width and height width = float(_config['width']) / dpi height = float(_config['height']) / dpi matplotlib.rcParams['figure.figsize'] = (width, height) # Font size fontsize = _config['fontsize'] matplotlib.rcParams['font.size'] = fontsize # Default Figure Format fmt = _config['format'] supported_formats = _config['supported_formats'] if fmt not in supported_formats: raise ValueError("Unsupported format %s" %fmt) if matplotlib.__version__ < '1.2.0': matplotlib.rcParams.update({'savefig.format': fmt}) else: matplotlib.rcParams['savefig.format'] = fmt # Interactive mode interactive = _config['interactive'] matplotlib.interactive(interactive) def _init_config(): dpi = matplotlib.rcParams['figure.dpi'] if matplotlib.__version__ < '1.2.0': matplotlib.rcParams.update({'savefig.format': 'png'}) fmt = matplotlib.rcParams['savefig.format'] width, height = matplotlib.rcParams['figure.figsize'] fontsize = matplotlib.rcParams['font.size'] _config['dpi'] = dpi _config['format'] = fmt _config['width'] = width*dpi _config['height'] = height*dpi _config['fontsize'] = fontsize _config['close'] = True _config['interactive'] = matplotlib.is_interactive() _config['angular'] = False _config['supported_formats'] = ['png', 'jpg', 'svg'] _config['context'] = None _config = {} _init_config()
apache-2.0
ddboline/pylearn2
pylearn2/scripts/plot_monitor.py
37
10204
#!/usr/bin/env python """ usage: plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl Loads any number of .pkl files produced by train.py. Extracts all of their monitoring channels and prompts the user to select a subset of them to be plotted. """ from __future__ import print_function __authors__ = "Ian Goodfellow, Harm Aarts" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import gc import numpy as np import sys from theano.compat.six.moves import input, xrange from pylearn2.utils import serial from theano.printing import _TagGenerator from pylearn2.utils.string_utils import number_aware_alphabetical_key from pylearn2.utils import contains_nan, contains_inf import argparse channels = {} def unique_substring(s, other, min_size=1): """ .. todo:: WRITEME """ size = min(len(s), min_size) while size <= len(s): for pos in xrange(0,len(s)-size+1): rval = s[pos:pos+size] fail = False for o in other: if o.find(rval) != -1: fail = True break if not fail: return rval size += 1 # no unique substring return s def unique_substrings(l, min_size=1): """ .. todo:: WRITEME """ return [unique_substring(s, [x for x in l if x is not s], min_size) for s in l] def main(): """ .. todo:: WRITEME """ parser = argparse.ArgumentParser() parser.add_argument("--out") parser.add_argument("model_paths", nargs='+') parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1') options = parser.parse_args() model_paths = options.model_paths if options.out is not None: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt print('generating names...') model_names = [model_path.replace('.pkl', '!') for model_path in model_paths] model_names = unique_substrings(model_names, min_size=10) model_names = [model_name.replace('!','') for model_name in model_names] print('...done') for i, arg in enumerate(model_paths): try: model = serial.load(arg) except Exception: if arg.endswith('.yaml'): print(sys.stderr, arg + " is a yaml config file," + "you need to load a trained model.", file=sys.stderr) quit(-1) raise this_model_channels = model.monitor.channels if len(sys.argv) > 2: postfix = ":" + model_names[i] else: postfix = "" for channel in this_model_channels: channels[channel+postfix] = this_model_channels[channel] del model gc.collect() while True: # Make a list of short codes for each channel so user can specify them # easily tag_generator = _TagGenerator() codebook = {} sorted_codes = [] for channel_name in sorted(channels, key = number_aware_alphabetical_key): code = tag_generator.get_tag() codebook[code] = channel_name codebook['<'+channel_name+'>'] = channel_name sorted_codes.append(code) x_axis = 'example' print('set x_axis to example') if len(channels.values()) == 0: print("there are no channels to plot") break # If there is more than one channel in the monitor ask which ones to # plot prompt = len(channels.values()) > 1 if prompt: # Display the codebook for code in sorted_codes: print(code + '. ' + codebook[code]) print() print("Put e, b, s or h in the list somewhere to plot " + "epochs, batches, seconds, or hours, respectively.") response = input('Enter a list of channels to plot ' + \ '(example: A, C,F-G, h, <test_err>) or q to quit' + \ ' or o for options: ') if response == 'o': print('1: smooth all channels') print('any other response: do nothing, go back to plotting') response = input('Enter your choice: ') if response == '1': for channel in channels.values(): k = 5 new_val_record = [] for i in xrange(len(channel.val_record)): new_val = 0. count = 0. for j in xrange(max(0, i-k), i+1): new_val += channel.val_record[j] count += 1. new_val_record.append(new_val / count) channel.val_record = new_val_record continue if response == 'q': break #Remove spaces response = response.replace(' ','') #Split into list codes = response.split(',') final_codes = set([]) for code in codes: if code == 'e': x_axis = 'epoch' continue elif code == 'b': x_axis = 'batche' elif code == 's': x_axis = 'second' elif code == 'h': x_axis = 'hour' elif code.startswith('<'): assert code.endswith('>') final_codes.add(code) elif code.find('-') != -1: #The current list element is a range of codes rng = code.split('-') if len(rng) != 2: print("Input not understood: "+code) quit(-1) found = False for i in xrange(len(sorted_codes)): if sorted_codes[i] == rng[0]: found = True break if not found: print("Invalid code: "+rng[0]) quit(-1) found = False for j in xrange(i,len(sorted_codes)): if sorted_codes[j] == rng[1]: found = True break if not found: print("Invalid code: "+rng[1]) quit(-1) final_codes = final_codes.union(set(sorted_codes[i:j+1])) else: #The current list element is just a single code final_codes = final_codes.union(set([code])) # end for code in codes else: final_codes ,= set(codebook.keys()) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] styles = list(colors) styles += [color+'--' for color in colors] styles += [color+':' for color in colors] fig = plt.figure() ax = plt.subplot(1,1,1) # plot the requested channels for idx, code in enumerate(sorted(final_codes)): channel_name= codebook[code] channel = channels[channel_name] y = np.asarray(channel.val_record) if contains_nan(y): print(channel_name + ' contains NaNs') if contains_inf(y): print(channel_name + 'contains infinite values') if x_axis == 'example': x = np.asarray(channel.example_record) elif x_axis == 'batche': x = np.asarray(channel.batch_record) elif x_axis == 'epoch': try: x = np.asarray(channel.epoch_record) except AttributeError: # older saved monitors won't have epoch_record x = np.arange(len(channel.batch_record)) elif x_axis == 'second': x = np.asarray(channel.time_record) elif x_axis == 'hour': x = np.asarray(channel.time_record) / 3600. else: assert False ax.plot( x, y, styles[idx % len(styles)], marker = '.', # add point margers to lines label = channel_name) plt.xlabel('# '+x_axis+'s') ax.ticklabel_format( scilimits = (-3,3), axis = 'both') handles, labels = ax.get_legend_handles_labels() lgd = ax.legend(handles, labels, loc = 'upper left', bbox_to_anchor = (1.05, 1.02)) # Get the axis positions and the height and width of the legend plt.draw() ax_pos = ax.get_position() pad_width = ax_pos.x0 * fig.get_size_inches()[0] pad_height = ax_pos.y0 * fig.get_size_inches()[1] dpi = fig.get_dpi() lgd_width = ax.get_legend().get_frame().get_width() / dpi lgd_height = ax.get_legend().get_frame().get_height() / dpi # Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches. # I had trouble getting everything to align vertically. ax_width = 3 ax_height = 3 total_width = 2*pad_width + ax_width + lgd_width total_height = 2*pad_height + np.maximum(ax_height, lgd_height) fig.set_size_inches(total_width, total_height) ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height]) if(options.yrange is not None): ymin, ymax = map(float, options.yrange.split(':')) plt.ylim(ymin, ymax) if options.out is None: plt.show() else: plt.savefig(options.out) if not prompt: break if __name__ == "__main__": main()
bsd-3-clause
codematician/study
study/ml/tests/test_classifiers.py
1
3754
import unittest import pandas as pd from study.ml.classifiers import DecisionTreeClassifier, LookUpClassifier, MajorityClassifier class ClassifierBaseTest(unittest.TestCase): data1_df = pd.DataFrame({'one': [1., 2., 3., 4.], 'two': [1., 3., 2., 1.]}) class TestDecisionTreeClassifier(ClassifierBaseTest): """Test the LookupClassifier""" def test_creation(self): """Test creation of decision tree classifier""" classifier = DecisionTreeClassifier() self.assertTrue(classifier) def test_fit(self): """Test fit method of decision tree classifier""" fit_res = DecisionTreeClassifier().fit(self.data1_df, "two") self.assertTrue(fit_res) def test_prediction_lookup(self): """Test predict method of decision tree classifier""" classifier = DecisionTreeClassifier() predict_field = "two" classifier.fit(self.data1_df, predict_field) predict_input = pd.Series(data=[2., ], index=["one", ]) prediction = classifier.predict(predict_input) self.assertEqual(prediction, self.data1_df[self.data1_df["one"] == 2.][predict_field].iloc[0]) def test_prediction_default(self): """Test predict method of decision tree classifier""" classifier = DecisionTreeClassifier() predict_field = "two" classifier.fit(self.data1_df, predict_field) predict_input = pd.Series(data=[5., ], index=["one"]) prediction = classifier.predict(predict_input) self.assertEqual(prediction, self.data1_df[predict_field].mode()[0]) class TestLookupClassifier(ClassifierBaseTest): """Test the LookupClassifier""" def test_creation(self): """Test creation of lookup classifier""" classifier = LookUpClassifier() self.assertTrue(classifier) def test_fit(self): """Test fit method of lookup classifier""" fit_res = LookUpClassifier().fit(self.data1_df, "two") self.assertTrue(fit_res) def test_prediction_lookup(self): """Test predict method of lookup classifier""" classifier = LookUpClassifier() predict_field = "two" classifier.fit(self.data1_df, predict_field) predict_input = pd.Series(data=[2., ], index=["one", ]) prediction = classifier.predict(predict_input) self.assertEqual(prediction, self.data1_df[self.data1_df["one"] == 2.][predict_field].iloc[0]) def test_prediction_default(self): """Test predict method of lookup classifier""" classifier = LookUpClassifier() predict_field = "two" classifier.fit(self.data1_df, predict_field) predict_input = pd.Series(data=[5., ], index=["one"]) prediction = classifier.predict(predict_input) self.assertEqual(prediction, self.data1_df[predict_field].mode()[0]) class TestMajorityClassifier(ClassifierBaseTest): """Test the MajoricyClassifier""" def test_creation(self): """Test creation of majority classifier""" classifier = MajorityClassifier() self.assertTrue(classifier) def test_fit(self): """Test fit method of majority classifier""" fit_res = MajorityClassifier().fit(self.data1_df, "two") self.assertTrue(fit_res) def test_prediction(self): """Test predict method of majority classifier""" classifier = MajorityClassifier() predict_field = "two" classifier.fit(self.data1_df, predict_field) predict_input = pd.Series([1., ], index=["one", ]) prediction = classifier.predict(predict_input) self.assertEqual(prediction, self.data1_df[predict_field].mode()[0]) if __name__ == '__main__': unittest.main()
apache-2.0
kikimaroca/beamtools
beamtools/dev/specplot.py
1
1194
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 8 17:47:28 2018 @author: cpkmanchee """ import numpy as np import matplotlib.pyplot as plt import beamtools as bt from matplotlib.gridspec import GridSpec show_plt = True save_plt = False dpi=600 wlim = [1005,1065] f_sp ='/Users/cpkmanchee/Google Drive/PhD/Data/2016-05-13 REGEN spectra/2016-05-18 REGEN Cavity Dumped/20160518-002.csv' sp = bt.import_data_file(f_sp, 'oo_spec') sp.intensity = bt.normalize(sp.intensity) # flimit, _ = bt.pulse.spectrumFT([spec.wavelength,spec.intensity]) # AiFT = bt.pulse.autocorr(np.abs(flimit.et)**2) plot_grid = [1,1] plot_w = 3 asp_ratio = 0.8*plot_grid[0]/plot_grid[1] plot_h = plot_w*asp_ratio #create grid fig = plt.figure(figsize=(plot_w,plot_h), facecolor='w') gs = GridSpec(plot_grid[0], plot_grid[1]) ax1 = fig.add_subplot(gs[0,0]) #plot spectrum ax1.plot(sp.wavelength, sp.intensity, '-', c='xkcd:brick red') ax1.set_xlabel('Wavelength (nm)') ax1.set_ylabel('Intensity (arb.)') #ax1.set_xlim(wlim) ax1.tick_params('both', labelsize='x-small') fig.tight_layout() if show_plt: plt.show() if save_plt: fig.savefig('spec.png', dpi=dpi, bbox_inches='tight')
mit
davidsamu/seal
seal/io/convert.py
1
2457
""" Functions related to converting TPLCell data into Seal data. @author: David Samu """ import os import pandas as pd from seal.util import util, constants from seal.object import unit, unitarray def task_TPL_to_Seal(f_tpl, f_seal, task, rec_info): """Convert TPLCell data to Seal data of single task.""" # Load in Matlab structure (SimpleTPLCell). TPLCells = util.read_matlab_object(f_tpl, 'TPLStructs') # TPLCell data not iterable, i.e. empty (?). if not hasattr(TPLCells, '__iter__') or not len(TPLCells): print('Error: TPLCell data is empty in ', f_tpl) return # Create UnitArray (list of units) from TPLCell structures. kset = constants.kset params = [(TPLCell, rec_info, kset) for TPLCell in TPLCells] tUnits = util.run_in_pool(unit.Unit, params) # Add them to unit list of recording, combining all tasks. UA = unitarray.UnitArray(task) UA.add_task(task, tUnits) # Save Units. util.write_objects({'UnitArr': UA}, f_seal) def rec_TPL_to_Seal(tpl_dir, seal_dir, rec_info, excl_tasks=[]): """Convert TPLCell data to Seal data in recording folder.""" if not os.path.exists(tpl_dir): print('Error: Mssing TPLCell folder: ', tpl_dir) return # Query available TPLCell data files. f_tpl_cells = sorted([f for f in os.listdir(tpl_dir) if f[-4:] == '.mat']) # Extract task names from file names. tasks = pd.Series(f_tpl_cells, name='f_tpl_cell') tasks.index = [util.params_from_fname(f_tpl).loc['task'] for f_tpl in f_tpl_cells] # Check that there's no duplication in task names. dupli = tasks.index.duplicated() if dupli.any(): print('Error: Duplicated task names found: ' + ', '.join(tasks.index[dupli])) print('Please give unique names and rerun Seal Unit creation..') return # Exclude some tasks. to_include = [util.params_from_fname(f_tpl).loc['task'] not in excl_tasks for f_tpl in f_tpl_cells] tasks = tasks[tasks.index[to_include]] if not len(tasks): print('Error: No TPLCell object found in ' + tpl_dir) return # Create units for each task. for task, f_tpl_cell in tasks.iteritems(): print(' ', f_tpl_cell) f_tpl = tpl_dir + f_tpl_cell f_seal = seal_dir + f_tpl_cell[:-4] + '.data' task_TPL_to_Seal(f_tpl, f_seal, task, rec_info)
gpl-3.0
KarlTDebiec/Moldynplot
moldynplot/PDistFigureManager.py
2
15841
#!/usr/bin/python # -*- coding: utf-8 -*- # moldynplot.PDistFigureManager.py # # Copyright (C) 2015-2017 Karl T Debiec # All rights reserved. # # This software may be modified and distributed under the terms of the # BSD license. See the LICENSE file for details. """ Generates probability distribution figures to specifications """ ################################### MODULES ################################### from __future__ import (absolute_import, division, print_function, unicode_literals) if __name__ == "__main__": __package__ = str("moldynplot") import moldynplot from .myplotspec.FigureManager import FigureManager from .myplotspec.manage_defaults_presets import manage_defaults_presets from .myplotspec.manage_kwargs import manage_kwargs ################################### CLASSES ################################### class PDistFigureManager(FigureManager): """ Manages the generation of probability distribution figures. """ defaults = """ draw_figure: subplot_kw: autoscale_on: False multi_tick_params: left: on right: off bottom: on top: off shared_legend: True shared_legend_kw: spines: False handle_kw: ls: none marker: s mec: black legend_kw: borderaxespad: 0 frameon: False handletextpad: 0 loc: 9 numpoints: 1 draw_subplot: title_kw: verticalalignment: bottom ylabel: "Probability Distribution" yticklabels: [] tick_params: direction: out left: on right: off bottom: on top: off grid: True grid_kw: b: True color: [0.7,0.7,0.7] linestyle: '-' linewidth: 0.5 label_kw: zorder: 10 horizontalalignment: left verticalalignment: top draw_dataset: plot_kw: zorder: 10 fill_between_kw: color: [0.7, 0.7, 0.7] lw: 0 ylb: 0 yub: 1 zorder: 1 handle_kw: ls: none marker: s mec: black mean_kw: ls: none marker: o mec: black zorder: 11 """ available_presets = """ pmf: class: content help: Plot potential of mean force (PMF) draw_figure: multi_xticklabels: [2,3,4,5,6,7,8] multi_yticklabels: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5] draw_subplot: xlabel: Minimum N-O distance xticks: [2,3,4,5,6,7,8] ybound: [-3.2,0.8] ylabel: "Potential of Mean Force\\n(kcal/mol)" yticks: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5] draw_dataset: column: pmf dataset_kw: cls: moldynplot.dataset.H5Dataset default_address: /kde/pmf default_key: pmf draw_zero_line: True radgyr: class: content help: Radius of Gyration (Rg) draw_figure: multi_xticklabels: [0,5,10,15,20,25,30] draw_subplot: xlabel: $R_g$ (Å) xticks: [0,5,10,15,20,25,30] draw_dataset: column: rg dataset_kw: cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset calc_pdist: True pdist_kw: bandwidth: 0.1 grid: !!python/object/apply:numpy.linspace [0,30,1000] read_csv_kw: delim_whitespace: True header: 0 names: [frame, rg, rgmax] rmsd: class: content help: Root Mean Standard Deviation (RMSD) draw_figure: multi_xticklabels: [0,1,2,3,4,5] draw_subplot: xlabel: RMSD (Å) xticks: [0,1,2,3,4,5] draw_dataset: column: rmsd dataset_kw: cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset calc_pdist: True pdist_kw: bandwidth: 0.1 grid: !!python/object/apply:numpy.linspace [0,5,1000] read_csv_kw: delim_whitespace: True header: 0 names: [frame, rmsd] r1: class: content help: Format subplot for R1 relaxation draw_subplot: xlabel: "$R_1$" xticks: [0.0,0.5,1.0,1.5,2.0,2.5,3.0] draw_dataset: dataset_kw: pdist_kw: bandwidth: 0.02 column: r1 r2: class: content help: Format subplot for R2 relaxation draw_subplot: xlabel: "$R_2$" xticks: [0,2,4,6,8,10,12,14,16,18,20] draw_dataset: dataset_kw: pdist_kw: bandwidth: 0.3 column: r2 r2/r1: class: content help: Format subplot for R2/R1 relaxation draw_subplot: xlabel: "$R_2$/$R_1$" xticks: [3,4,5,6,7,8,9,10,11] draw_dataset: dataset_kw: pdist_kw: bandwidth: r2/r1: 0.1 column: r2/r1 hetnoe: class: content help: Format subplot for Heteronuclear NOE relaxation draw_subplot: xlabel: "Heteronuclear NOE" xticks: [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] draw_dataset: column: noe dataset_kw: pdist_kw: bandwidth: 0.03 rotdif: class: content help: Format subplot for rotational diffusion draw_subplot: xlabel: "$τ_c$ (ns)" xticks: [5,6,7,8,9,10,11,12,13,14] draw_dataset: column: rotdif dataset_kw: pdist_kw: bandwidth: 0.2 relaxation_3: class: content help: Three stacked plots including R1, R2, and HetNOE draw_figure: nrows: 3 shared_ylabel: "Probability Distribution" subplots: 0: preset: r1 ylabel: null 1: preset: r2 ylabel: null 2: preset: hetnoe ylabel: null relaxation_4: class: content help: Four stacked plots including R1, R2, R2/R1, and HetNOE draw_figure: nrows: 4 shared_ylabel: "Probability Distribution" subplots: 0: preset: r1 ylabel: null 1: preset: r2 ylabel: null 2: preset: r2/r1 ylabel: null 3: preset: hetnoe ylabel: null rotdif_2: class: content help: Two stacked plots including R2/R1 rotdif draw_figure: nrows: 2 shared_ylabel: "Probability Distribution" subplots: 0: preset: r2/r1 ylabel: null 1: preset: rotdif ylabel: null rotdif_4: class: content help: Two stacked plots including R2/R1 rotdif draw_figure: nrows: 2 ncols: 2 shared_ylabel: "Probability Distribution" subplots: 0: preset: r2/r1 ylabel: null 1: preset: r2/r1 ylabel: null 2: preset: rotdif ylabel: null 3: preset: rotdif ylabel: null manuscript: class: target inherits: manuscript draw_figure: bottom: 0.55 hspace: 0.10 left: 0.30 right: 0.10 sub_height: 1.00 sub_width: 2.65 top: 0.10 wspace: 0.10 shared_legend_kw: left: 0.30 sub_width: 2.65 bottom: 0.00 sub_height: 0.20 handle_kw: mew: 0.5 ms: 5 legend_kw: labelspacing: 0.5 ncol: 6 shared_xlabel_kw: bottom: -0.24 title_kw: top: -0.1 draw_subplot: xlabel_kw: labelpad: 3 ylabel_kw: labelpad: 6 y2ticks: [] y2label_kw: rotation: 270 verticalalignment: bottom grid_kw: linewidth: 0.5 draw_label: True label_kw: border_lw: 1 xabs: 0.020 yabs: -0.025 draw_dataset: mean_kw: mew: 0.5 ms: 2 handle_kw: mew: 0.5 ms: 5 presentation_wide: class: target inherits: presentation_wide draw_figure: bottom: 1.80 hspace: 0.20 left: 0.80 right: 0.80 sub_height: 2.00 sub_width: 4.00 top: 0.60 wspace: 0.20 shared_legend_kw: left: 0.80 sub_width: 16.60 bottom: 0.00 sub_height: 0.60 handle_kw: mew: 2.0 ms: 20 legend_kw: labelspacing: 0.5 ncol: 6 shared_ylabel_kw: left: -0.5 shared_xlabel_kw: bottom: -0.9 draw_subplot: y2ticks: [] y2label_kw: labelpad: 10 rotation: 270 verticalalignment: bottom draw_dataset: mean_kw: mew: 2.0 ms: 8 handle_kw: mew: 2.0 ms: 20 """ @manage_defaults_presets() @manage_kwargs() def draw_dataset(self, subplot, column=None, draw_pdist=True, draw_fill_between=False, draw_mean=False, draw_plot=False, draw_zero_line=False, **kwargs): """ Loads a dataset and draws it on a subplot. Loaded dataset should have attribute `pdist_df`. Arguments: subplot (Axes): :class:`Axes<matplotlib.axes.Axes>` on which to draw dataset_kw (dict): Keyword arguments passed to :meth:`load_dataset <myplotspec.FigureManager.FigureManager.load_dataset>` plot_kw (dict): Keyword arguments passed to methods of :class:`Axes<matplotlib.axes.Axes>` column (str): Column within `pdist_df` to use draw_fill_between (bool): Fill between specified region fill_between_kw (dict): Keyword arguments used to configure call to :meth:`fill_between<matplotlib.axes.Axes.fill_between>` fill_between_kw[x] (list, ndarray): x values passed to :meth:`fill_between<matplotlib.axes.Axes.fill_between>` fill_between_kw[ylb] (list, ndarray): y lower bound values passed to :meth:`fill_between<matplotlib.axes.Axes.fill_between>` fill_between_kw[yub] (list, ndarray): y upper bound values passed to :meth:`fill_between<matplotlib.axes.Axes.fill_between>` draw_pdist (bool): Draw probability distribution pdist_kw (dict): Keyword arguments using to configure call to :meth:`plot<matplotlib.axes.Axes.plot>` draw_mean (bool): Draw point at mean value mean_kw (dict): Keyword arguments used to configure call to :meth:`plot<matplotlib.axes.Axes.plot>` verbose (int): Level of verbose output kwargs (dict): Additional keyword arguments """ from warnings import warn import pandas as pd import numpy as np from .myplotspec import get_colors, multi_get_copy # Process arguments verbose = kwargs.get("verbose", 1) dataset_kw = multi_get_copy("dataset_kw", kwargs, {}) if "infile" in kwargs: dataset_kw["infile"] = kwargs["infile"] dataset = self.load_dataset(verbose=verbose, **dataset_kw) if dataset is not None and hasattr(dataset, "pdist_df"): pdist_df = dataset.pdist_df elif dataset is not None and hasattr(dataset, "datasets"): try: pdist_df = dataset.pdist_df = pd.DataFrame( dataset.datasets["pmf"]["pmf"], index=dataset.datasets["pmf"]["x"], columns = ["pmf"]) except: pdist_df = dataset.pdist_df = pd.DataFrame( dataset.datasets["pmf"]["pmf"], index=dataset.datasets["pmf"]["center"], columns = ["pmf"]) dataset.pdist_df.index.name = "x" else: pdist_df = None # Configure plot settings plot_kw = multi_get_copy("plot_kw", kwargs, {}) get_colors(plot_kw, kwargs) # Draw fill_between if draw_fill_between: fill_between_kw = multi_get_copy("fill_between_kw", kwargs, {}) get_colors(fill_between_kw, plot_kw) if "x" in fill_between_kw: fb_x = fill_between_kw.pop("x") if "ylb" in fill_between_kw: fb_ylb = fill_between_kw.pop("ylb") if "yub" in fill_between_kw: fb_yub = fill_between_kw.pop("yub") subplot.fill_between(fb_x, fb_ylb, fb_yub, **fill_between_kw) # Draw pdist if draw_pdist: if not hasattr(dataset, "pdist_df"): warn("'draw_pdist' is enabled but dataset does not have the " "necessary attribute 'pdist_df', skipping.") else: pdist = pdist_df[column] pdist_kw = plot_kw.copy() pdist_kw.update(kwargs.get("pdist_kw", {})) pd_x = pdist.index.values pd_y = np.squeeze(pdist.values) subplot.plot(pd_x, pd_y, **pdist_kw) pdist_rescale = True if pdist_rescale: pdist_max = pd_y.max() y_max = subplot.get_ybound()[1] if (pdist_max > y_max / 1.25 or not hasattr(subplot, "_mps_rescaled")): # print("\nPIDST MAX: {0}\n".format(pdist_max)) subplot.set_ybound(0, pdist_max*1.25) yticks = [0, pdist_max*0.25, pdist_max*0.50, pdist_max*0.75, pdist_max, pdist_max*1.25] subplot.set_yticks(yticks) subplot._mps_rescaled = True if draw_mean: mean_kw = plot_kw.copy() mean_kw.update(kwargs.get("mean_kw", {})) mean = np.sum(np.array(pd_x, np.float64) *np.array(pd_y, np.float64)) if verbose >= 1: print("mean: {0:6.3f}".format(mean)) subplot.plot(mean, pd_y[np.abs(pd_x - mean).argmin()], **mean_kw) if draw_plot: if "x" in kwargs: x = kwargs.get("x") subplot.plot([x, x], [0,1], **plot_kw) if draw_zero_line: subplot.plot([0, 10], [0,0], linewidth=0.5, color="black") #################################### MAIN ##################################### if __name__ == "__main__": PDistFigureManager().main()
bsd-3-clause
clingsz/GAE
misc/cv/collect_ND5_3.py
1
12374
# -*- coding: utf-8 -*- """ Created on Fri Mar 24 10:53:51 2017 @author: cling """ # collect ND5_3 import misc.cv.exp_test as exp_test from misc.utils import saveobj,getJobOpts,loadobj,spearmancorr import numpy from misc.data_gen import DataOpts,load_data import misc.data_gen as data_gen from gae.model.trainer import TrainerOpts import gae.model.trainer as trainer import cPickle from gae.model.guidedae import GAEOpts import misc.cv.run_exps as run_exps import matplotlib.pyplot as plt from scipy import stats import os # comparing the best model def show_cv_setting(): w_lst,d_lst,lam_lst,l2_lst = get_cv_setting() print 'width list:',w_lst print 'depth list:',d_lst print 'alpha list:',lam_lst print 'L2 list:',l2_lst print 'test folds = 5' print 'validation folds = 3' def comparing_best_model(): best_code_length = get_best_code_length() gaes,pars = loadobj('result/temp/ND5-3-AE-LAMW.pkl') # lam_lst = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] lam_lst = [0,0.1,0.3,0.5,0.7,1.0] gaes = filter(lambda g: g.paras[0][0]==best_code_length and g.paras[0][2] in lam_lst,gaes) plt.figure(figsize=[10,10]) for g in gaes: # print g.name l = g.paras[0,2] g = exp_test.calc_best_test_err(g,[1-l,l]) RE = g.best_test_errs[:,0] PE = g.best_test_errs[:,2] NM = g.name x,y = numpy.mean(RE),numpy.mean(PE) sx,sy = numpy.std(RE)/numpy.sqrt(5),numpy.std(PE)/numpy.sqrt(5) plt.errorbar(x,y,yerr=sy,xerr=sx) plt.text(x,y,NM) raw,pca = loadobj('result/temp/ND5-3-rawpca.pkl') # for i in range(pca.test_errs.shape[0]): for i in range(3,6): RE = pca.test_errs[i,:,0] PE = pca.test_errs[i,:,2] NM = 'PCA'+str(i+1) x,y = numpy.mean(RE),numpy.mean(PE) sx,sy = numpy.std(RE)/numpy.sqrt(5),numpy.std(PE)/numpy.sqrt(5) plt.errorbar(x,y,yerr=sy,xerr=sx) plt.text(x,y,NM) rawpe = numpy.mean(raw.best_test_errs[:,2]) plt.plot([0.20,0.50],[rawpe]*2,'k--') plt.show() def get_best_model_setting(best_code_length): print 'Getting best model setting...' gaes,pars = loadobj('result/temp/ND5-3-AE-W.pkl') bgae = gaes[best_code_length-1] print bgae.name terr = bgae.test_errs min_err_id = numpy.argmin(numpy.mean(terr[:,:,0] + terr[:,:,2],axis=1)) return bgae.paras[min_err_id,:] def get_immu_age(model): print 'getting Imunne Age for all patients' data = data_gen.get_processed_data() x,_,_ = data_gen.make_standardize(data['cyto']) ag,mu,sig = data_gen.make_standardize(data['demo']['age']) y = model.predict(x) y = y * sig + mu immu_age = y plt.scatter(data['demo']['age'],immu_age) plt.xlim([0,100]) plt.ylim([0,100]) plt.plot([0,100],[0,100],'k--') plt.ylabel('Immune Age') plt.xlabel('Age') plt.show() # X = numpy.concatenate([data['demo']['age'],immu_age],axis=1) # utils.save_csv_table('tables/ia_vs_age.csv',X,col_name=['Age','ImmuAge']) return immu_age def show_best_gae(model): data = load_data(DataOpts()) x = data.get_all()[0][0] xlb = data.x_labels J = trainer.get_Jacobian(model,x) lst = numpy.argsort(-numpy.sum(numpy.abs(J),axis=0)) plt.figure(figsize=[10,5]) plt.boxplot(J[:,lst]) plt.grid() plt.xticks(numpy.arange(len(xlb))+1,xlb[lst],rotation='vertical') plt.ylabel('Jacobian') plt.show() def fig_boxplot_cverr(): aes,nms = get_results() E = numpy.asarray(aes) pre_E = E[:,:,2] rce_E = E[:,:,0] n = E.shape[0] plt.figure(figsize=[10,10]) xm = numpy.mean(rce_E,axis=1) xs = numpy.std(rce_E,axis=1)/5.0 ym = numpy.mean(pre_E,axis=1) ys = numpy.std(pre_E,axis=1)/5.0 plt.errorbar(xm,ym,xerr=xs,yerr=ys,fmt='o') for i in range(n): plt.text(xm[i],ym[i],nms[i]) plt.grid() plt.xlabel('RCE') plt.ylabel('Age prediction') plt.show() def get_results(): cverrs = [] nms = [] rawpcafile = 'result/temp/ND5-3-rawpca.pkl' if not os.path.isfile(rawpcafile): print 'did not find '+rawpcafile +' Run...' get_RAW_PCA_result() raw,pca = loadobj(rawpcafile) cverrs.append(raw.best_test_errs) nms.append('RAW') E = pca.test_errs for i in range(E.shape[0]): cverrs.append(E[i,:,:]) nms.append('PCA'+str(i+1)) aefile = 'result/temp/ND5-3-AE-W.pkl' if not os.path.isfile(aefile): print 'did not find '+ aefile +' Run...' collect_ND5_3() gaes,pars = loadobj(aefile) for er in gaes: er = exp_test.calc_best_test_err(er,[0.5,0.5]) cverrs.append(er.best_test_errs) nms.append(er.name) return cverrs,nms def get_best_code_length(): aes,nms = get_results() aes = aes[11:] print 'Getting best code length...' w_lst = range(1,11) c = 0 m = len(w_lst) PE = numpy.zeros([m,5]) pvs = [] best_code_length = 1 for i in range(m): PE[i,:] = aes[c][:,0] + aes[c][:,2] if i>0: _,pvpe = stats.ttest_rel(PE[i,:],PE[i-1,:]) pvs.append(pvpe) print i,i+1,pvpe if best_code_length==1 and pvpe>0.05: best_code_length = i c = c + 1 plt.boxplot(PE.transpose()) plt.title('Best Code Length: ' + str(best_code_length)) plt.xlabel('Code Length') plt.ylabel('Total Loss') plt.show() return best_code_length def get_best_model(fileName='result/temp/best_gae_model.pkl'): if os.path.isfile(fileName): model = loadobj(fileName) else: print 'Train on the best model settings...' best_model_setting = get_best_model_setting() w,d,lam,l2 = best_model_setting model = trainer.build_gae(w=int(w),d=int(d),lam=lam,l2=l2,verbose=1, wr=1,batch_size=10,noise_level=0,blind_epochs=500) print w,d,lam,l2 data = data_gen.get_training_data() model.train(data['X'],data['Y']) saveobj(fileName,model) return model def compare_age_prediction_error_without_MIG(best_model_setting,compageFile): w,d,lam,l2 = best_model_setting print w,d,lam,l2 aeopt = GAEOpts(w=int(w),wr=1,d=int(d), lam=lam,verbose=1, l2=l2,batch_size=10,noise_level=0, blind_epochs=500) test_folds = 5 dataopt = DataOpts(test_folds=test_folds) gae = trainer.make_trainer(traineropt=TrainerOpts(name='AE', aeopt=aeopt)) e = [] for t in range(test_folds): dataopt.test_fold_id = t data = load_data(dataopt) MIGid = numpy.where(data.x_labels=='MIG')[0][0] ds = data.get_test() errs1 = gae.train_and_test(ds) ds[0][0][:,MIGid] = 0 ds[1][0][:,MIGid] = 0 errs2 = gae.train_and_test(ds) e.append([errs1[2],errs2[2]]) saveobj(compageFile,e) def plot_IMage_vs_other_phenotypes(immu_age): data = data_gen.get_processed_data() demo = data['demo'] pheno = [demo['age'],demo['bmi'],demo['mfs']] phnms = ['age','bmi','mfs'] phenobin = [demo['source'],demo['gender'],demo['cmv'],demo['ebv']] phbins = ['source','gender','cmv','ebv'] plt.figure(figsize=[10,4]) for i,ph,pn in zip(range(1,4),pheno,phnms): plt.subplot(1,3,i) lst = numpy.where(~numpy.isnan(ph))[0] plt.scatter(ph[lst,:],immu_age[lst,:]) a,b = spearmancorr(ph[lst,:],immu_age[lst,:]) plt.ylabel('ImmuAge') plt.xlabel(pn) plt.title('SpCorr=%.2f,PV=%.3f' % (a,b)) plt.tight_layout() plt.show() plt.figure(figsize=[10,10]) for i,ph,pn in zip(range(1,5),phenobin,phbins): plt.subplot(2,2,i) lst = numpy.where(~numpy.isnan(ph))[0] ph = ph[lst,:] y = immu_age[lst,:] l0 = numpy.where(ph==0)[0] l1 = numpy.where(ph==1)[0] plt.boxplot([y[l0],y[l1]]) a,b = spearmancorr(ph,y) plt.ylabel('ImmuAge') plt.xlabel(pn) plt.title('SpCorr=%.2f,PV=%.3f' % (a,b)) plt.tight_layout() plt.show() from sklearn.decomposition import PCA def pcatrans(x): pca = PCA(n_components=2) pca.fit(x) z = pca.transform(x) return z def showpcavsgae(): best_model_name = 'result/temp/best_gae_model.pkl' model = loadobj(best_model_name) data = data_gen.get_processed_data() x,_,_ = data_gen.make_standardize(data['cyto']) age = data['demo']['age'] c = model.encode(x) z1 = pcatrans(x) z2 = pcatrans(c) plt.figure(figsize=[10,5]) tits = ['PCA on Cytokines','PCA on GAE codes'] for i,z in zip(range(2),[z1,z2]): plt.subplot(1,2,i+1) plt.scatter(z[:,0],z[:,1],c=age,cmap='hot',s=15) plt.xlabel('PCA-1') plt.ylabel('PCA-2') plt.title(tits[i]) plt.colorbar() plt.show() def get_lin_model(): test_folds = 5 valid_folds = 3 dataopt = DataOpts(test_folds=test_folds, valid_folds=valid_folds) test = exp_test.makeTest(dataopt=dataopt, traineropt=TrainerOpts(name='None')) model = test.trainer data = test.dataset.get_all() model.train(data) plt.scatter(data[0][1],model.predict(data[0][0])) plt.show() return model def get_cv_setting(): w_lst = range(1,11) d_lst = [1,2,3] lam_lst = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] l2_lst = [1e-3,1e-2,1e-1] return w_lst,d_lst,lam_lst,l2_lst def collect_ND5_3(): file_name = 'result/temp/logs_ND5_3.pkl' res = cPickle.load(open(file_name,'rb')) w_lst,d_lst,lam_lst,l2_lst = get_cv_setting() joblsts = [w_lst,d_lst,lam_lst,l2_lst] test_folds = 5 valid_folds = 3 trials = len(res) na = 8 test_errs = numpy.zeros([trials,test_folds,na]) valid_errs = numpy.zeros([trials,test_folds,valid_folds,na]) paras = numpy.empty([trials,len(joblsts)]) c = 0 for j in range(trials): rt = res[c] paras[j,:] = getJobOpts(c,joblsts) for i in range(test_folds): r = rt[i+1] for k in range(valid_folds): valid_errs[j,i,k,:] = r[k] test_errs[j,i,:] = r[-1] c = c + 1 aes = [] for lam in lam_lst: for w in w_lst: er = exp_test.ExpResult('GAE'+str(lam)+'-'+str(w)) lim = numpy.where(paras[:,0]==w)[0] lst = numpy.where(paras[:,2]==lam)[0] lst = numpy.intersect1d(lim,lst) er.load_exp_result(valid_errs[lst,:,:,:],test_errs[lst,:,:],paras[lst,:]) aes.append(er) saveobj('result/temp/ND5-3-AE-LAMW.pkl',[aes,(test_errs,paras)]) aes = [] for w in w_lst: er = exp_test.ExpResult('GAE'+'-'+str(w)) lst = numpy.where(paras[:,0]==w)[0] er.load_exp_result(valid_errs[lst,:,:,:],test_errs[lst,:,:],paras[lst,:]) aes.append(er) saveobj('result/temp/ND5-3-AE-W.pkl',[aes,(test_errs,paras)]) def get_RAW_PCA_result(): dataopt = DataOpts(test_folds=5) blER = run_exps.get_raw_result(dataopt) pcaER = run_exps.get_pca_result(dataopt,lst=range(1,11)) saveobj('result/temp/ND5-3-rawpca.pkl',[blER,pcaER]) def main(): best_model_name = 'temp/best_gae_model.pkl' # if not os.path.isfile(best_model_name): # get_best_model(best_model_name) model = loadobj(best_model_name) # linmodel = get_lin_model() immu_age = get_immu_age(model) # show_best_gae(model) # plot_IMage_vs_other_phenotypes(immu_age) # compare_age_prediction_error_without_MIG() def run(): best_model_name = 'temp/best_gae_model.pkl' collect_ND5_3() train_best_model(best_model_name) def get_model(): best_model_name = 'temp/best_gae_model.pkl' return loadobj(best_model_name) if __name__ == '__main__': main() # showpcavsgae() # main() # comparing_best_model() # get_best_code_length() # fig_boxplot_cverr() # aes = get_results() # E = numpy.asarray(aes) # get_results() # collect_ND5_3() # get_RAW_PCA_result() # get_gae_path()
gpl-3.0
grlee77/scipy
scipy/stats/_discrete_distns.py
2
50643
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from functools import partial from scipy import special from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta from scipy._lib._util import _lazywhere, rng_integers from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh import numpy as np from ._distn_infrastructure import ( rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names, _check_shape) from .biasedurn import (_PyFishersNCHypergeometric, _PyWalleniusNCHypergeometric, _PyStochasticLib3) class binom_gen(rv_discrete): r"""A binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `binom` is: .. math:: f(k) = \binom{n}{k} p^k (1-p)^{n-k} for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1` `binom` takes :math:`n` and :math:`p` as shape parameters, where :math:`p` is the probability of a single success and :math:`1-p` is the probability of a single failure. %(after_notes)s %(example)s See Also -------- hypergeom, nbinom, nhypergeom """ def _rvs(self, n, p, size=None, random_state=None): return random_state.binomial(n, p, size) def _argcheck(self, n, p): return (n >= 0) & (p >= 0) & (p <= 1) def _get_support(self, n, p): return self.a, n def _logpmf(self, x, n, p): k = floor(x) combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) def _pmf(self, x, n, p): # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) return exp(self._logpmf(x, n, p)) def _cdf(self, x, n, p): k = floor(x) vals = special.bdtr(k, n, p) return vals def _sf(self, x, n, p): k = floor(x) return special.bdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.bdtrik(q, n, p)) vals1 = np.maximum(vals - 1, 0) temp = special.bdtr(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p, moments='mv'): q = 1.0 - p mu = n * p var = n * p * q g1, g2 = None, None if 's' in moments: g1 = (q - p) / sqrt(var) if 'k' in moments: g2 = (1.0 - 6*p*q) / var return mu, var, g1, g2 def _entropy(self, n, p): k = np.r_[0:n + 1] vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0) binom = binom_gen(name='binom') class bernoulli_gen(binom_gen): r"""A Bernoulli discrete random variable. %(before_notes)s Notes ----- The probability mass function for `bernoulli` is: .. math:: f(k) = \begin{cases}1-p &\text{if } k = 0\\ p &\text{if } k = 1\end{cases} for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1` `bernoulli` takes :math:`p` as shape parameter, where :math:`p` is the probability of a single success and :math:`1-p` is the probability of a single failure. %(after_notes)s %(example)s """ def _rvs(self, p, size=None, random_state=None): return binom_gen._rvs(self, 1, p, size=size, random_state=random_state) def _argcheck(self, p): return (p >= 0) & (p <= 1) def _get_support(self, p): # Overrides binom_gen._get_support!x return self.a, self.b def _logpmf(self, x, p): return binom._logpmf(x, 1, p) def _pmf(self, x, p): # bernoulli.pmf(k) = 1-p if k = 0 # = p if k = 1 return binom._pmf(x, 1, p) def _cdf(self, x, p): return binom._cdf(x, 1, p) def _sf(self, x, p): return binom._sf(x, 1, p) def _ppf(self, q, p): return binom._ppf(q, 1, p) def _stats(self, p): return binom._stats(1, p) def _entropy(self, p): return entr(p) + entr(1-p) bernoulli = bernoulli_gen(b=1, name='bernoulli') class betabinom_gen(rv_discrete): r"""A beta-binomial discrete random variable. %(before_notes)s Notes ----- The beta-binomial distribution is a binomial distribution with a probability of success `p` that follows a beta distribution. The probability mass function for `betabinom` is: .. math:: f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)} for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`, :math:`b > 0`, where :math:`B(a, b)` is the beta function. `betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters. References ---------- .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution %(after_notes)s .. versionadded:: 1.4.0 See Also -------- beta, binom %(example)s """ def _rvs(self, n, a, b, size=None, random_state=None): p = random_state.beta(a, b, size) return random_state.binomial(n, p, size) def _get_support(self, n, a, b): return 0, n def _argcheck(self, n, a, b): return (n >= 0) & (a > 0) & (b > 0) def _logpmf(self, x, n, a, b): k = floor(x) combiln = -log(n + 1) - betaln(n - k + 1, k + 1) return combiln + betaln(k + a, n - k + b) - betaln(a, b) def _pmf(self, x, n, a, b): return exp(self._logpmf(x, n, a, b)) def _stats(self, n, a, b, moments='mv'): e_p = a / (a + b) e_q = 1 - e_p mu = n * e_p var = n * (a + b + n) * e_p * e_q / (a + b + 1) g1, g2 = None, None if 's' in moments: g1 = 1.0 / sqrt(var) g1 *= (a + b + 2 * n) * (b - a) g1 /= (a + b + 2) * (a + b) if 'k' in moments: g2 = a + b g2 *= (a + b - 1 + 6 * n) g2 += 3 * a * b * (n - 2) g2 += 6 * n ** 2 g2 -= 3 * e_p * b * n * (6 - n) g2 -= 18 * e_p * e_q * n ** 2 g2 *= (a + b) ** 2 * (1 + a + b) g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n)) g2 -= 3 return mu, var, g1, g2 betabinom = betabinom_gen(name='betabinom') class nbinom_gen(rv_discrete): r"""A negative binomial discrete random variable. %(before_notes)s Notes ----- Negative binomial distribution describes a sequence of i.i.d. Bernoulli trials, repeated until a predefined, non-random number of successes occurs. The probability mass function of the number of failures for `nbinom` is: .. math:: f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k for :math:`k \ge 0`, :math:`0 < p \leq 1` `nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the number of successes, :math:`p` is the probability of a single success, and :math:`1-p` is the probability of a single failure. Another common parameterization of the negative binomial distribution is in terms of the mean number of failures :math:`\mu` to achieve :math:`n` successes. The mean :math:`\mu` is related to the probability of success as .. math:: p = \frac{n}{n + \mu} The number of successes :math:`n` may also be specified in terms of a "dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`, which relates the mean :math:`\mu` to the variance :math:`\sigma^2`, e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention used for :math:`\alpha`, .. math:: p &= \frac{\mu}{\sigma^2} \\ n &= \frac{\mu^2}{\sigma^2 - \mu} %(after_notes)s %(example)s See Also -------- hypergeom, binom, nhypergeom """ def _rvs(self, n, p, size=None, random_state=None): return random_state.negative_binomial(n, p, size) def _argcheck(self, n, p): return (n > 0) & (p > 0) & (p <= 1) def _pmf(self, x, n, p): # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k return exp(self._logpmf(x, n, p)) def _logpmf(self, x, n, p): coeff = gamln(n+x) - gamln(x+1) - gamln(n) return coeff + n*log(p) + special.xlog1py(x, -p) def _cdf(self, x, n, p): k = floor(x) return special.betainc(n, k+1, p) def _logcdf(self, x, n, p): k = floor(x) cdf = self._cdf(k, n, p) cond = cdf > 0.5 def f1(k, n, p): return np.log1p(-special.betainc(k + 1, n, 1 - p)) def f2(k, n, p): return np.log(cdf) with np.errstate(divide='ignore'): return _lazywhere(cond, (x, n, p), f=f1, f2=f2) def _sf_skip(self, x, n, p): # skip because special.nbdtrc doesn't work for 0<n<1 k = floor(x) return special.nbdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.nbdtrik(q, n, p)) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p): Q = 1.0 / p P = Q - 1.0 mu = n*P var = n*P*Q g1 = (Q+P)/sqrt(n*P*Q) g2 = (1.0 + 6*P*Q) / (n*P*Q) return mu, var, g1, g2 nbinom = nbinom_gen(name='nbinom') class geom_gen(rv_discrete): r"""A geometric discrete random variable. %(before_notes)s Notes ----- The probability mass function for `geom` is: .. math:: f(k) = (1-p)^{k-1} p for :math:`k \ge 1`, :math:`0 < p \leq 1` `geom` takes :math:`p` as shape parameter, where :math:`p` is the probability of a single success and :math:`1-p` is the probability of a single failure. %(after_notes)s See Also -------- planck %(example)s """ def _rvs(self, p, size=None, random_state=None): return random_state.geometric(p, size=size) def _argcheck(self, p): return (p <= 1) & (p > 0) def _pmf(self, k, p): return np.power(1-p, k-1) * p def _logpmf(self, k, p): return special.xlog1py(k - 1, -p) + log(p) def _cdf(self, x, p): k = floor(x) return -expm1(log1p(-p)*k) def _sf(self, x, p): return np.exp(self._logsf(x, p)) def _logsf(self, x, p): k = floor(x) return k*log1p(-p) def _ppf(self, q, p): vals = ceil(log1p(-q) / log1p(-p)) temp = self._cdf(vals-1, p) return np.where((temp >= q) & (vals > 0), vals-1, vals) def _stats(self, p): mu = 1.0/p qr = 1.0-p var = qr / p / p g1 = (2.0-p) / sqrt(qr) g2 = np.polyval([1, -6, 6], p)/(1.0-p) return mu, var, g1, g2 geom = geom_gen(a=1, name='geom', longname="A geometric") class hypergeom_gen(rv_discrete): r"""A hypergeometric discrete random variable. The hypergeometric distribution models drawing objects from a bin. `M` is the total number of objects, `n` is total number of Type I objects. The random variate represents the number of Type I objects in `N` drawn without replacement from the total population. %(before_notes)s Notes ----- The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not universally accepted. See the Examples for a clarification of the definitions used here. The probability mass function is defined as, .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}} {\binom{M}{N}} for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial coefficients are defined as, .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. %(after_notes)s Examples -------- >>> from scipy.stats import hypergeom >>> import matplotlib.pyplot as plt Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs if we choose at random 12 of the 20 animals, we can initialize a frozen distribution and plot the probability mass function: >>> [M, n, N] = [20, 7, 12] >>> rv = hypergeom(M, n, N) >>> x = np.arange(0, n+1) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group of chosen animals') >>> ax.set_ylabel('hypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `hypergeom` methods directly. To for example obtain the cumulative distribution function, use: >>> prb = hypergeom.cdf(x, M, n, N) And to generate random numbers: >>> R = hypergeom.rvs(M, n, N, size=10) See Also -------- nhypergeom, binom, nbinom """ def _rvs(self, M, n, N, size=None, random_state=None): return random_state.hypergeometric(n, M-n, N, size=size) def _get_support(self, M, n, N): return np.maximum(N-(M-n), 0), np.minimum(n, N) def _argcheck(self, M, n, N): cond = (M > 0) & (n >= 0) & (N >= 0) cond &= (n <= M) & (N <= M) return cond def _logpmf(self, k, M, n, N): tot, good = M, n bad = tot - good result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) - betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) - betaln(tot+1, 1)) return result def _pmf(self, k, M, n, N): # same as the following but numerically more precise # return comb(good, k) * comb(bad, N-k) / comb(tot, N) return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): # tot, good, sample_size = M, n, N # "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n') M, n, N = 1.*M, 1.*n, 1.*N m = M - n p = n/M mu = N*p var = m*n*N*(M - N)*1.0/(M*M*(M-1)) g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m g2 *= (M-1)*M*M g2 += 6.*n*N*(M-N)*m*(5.*M-6) g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) return mu, var, g1, g2 def _entropy(self, M, n, N): k = np.r_[N - (M - n):min(n, N) + 1] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual # integration. res = [] for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) def _logsf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)): if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-cdf) res.append(log1p(-exp(self.logcdf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(quant + 1, draw + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) def _logcdf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)): if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-sf) res.append(log1p(-exp(self.logsf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(0, quant + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom') class nhypergeom_gen(rv_discrete): r"""A negative hypergeometric discrete random variable. Consider a box containing :math:`M` balls:, :math:`n` red and :math:`M-n` blue. We randomly sample balls from the box, one at a time and *without* replacement, until we have picked :math:`r` blue balls. `nhypergeom` is the distribution of the number of red balls :math:`k` we have picked. %(before_notes)s Notes ----- The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not universally accepted. See the Examples for a clarification of the definitions used here. The probability mass function is defined as, .. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}} {{M \choose n}} for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`, and the binomial coefficient is: .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. It is equivalent to observing :math:`k` successes in :math:`k+r-1` samples with :math:`k+r`'th sample being a failure. The former can be modelled as a hypergeometric distribution. The probability of the latter is simply the number of failures remaining :math:`M-n-(r-1)` divided by the size of the remaining population :math:`M-(k+r-1)`. This relationship can be shown as: .. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))} where :math:`NHG` is probability mass function (PMF) of the negative hypergeometric distribution and :math:`HG` is the PMF of the hypergeometric distribution. %(after_notes)s Examples -------- >>> from scipy.stats import nhypergeom >>> import matplotlib.pyplot as plt Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs (successes) in a sample with exactly 12 animals that aren't dogs (failures), we can initialize a frozen distribution and plot the probability mass function: >>> M, n, r = [20, 7, 12] >>> rv = nhypergeom(M, n, r) >>> x = np.arange(0, n+2) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group with given 12 failures') >>> ax.set_ylabel('nhypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `nhypergeom` methods directly. To for example obtain the probability mass function, use: >>> prb = nhypergeom.pmf(x, M, n, r) And to generate random numbers: >>> R = nhypergeom.rvs(M, n, r, size=10) To verify the relationship between `hypergeom` and `nhypergeom`, use: >>> from scipy.stats import hypergeom, nhypergeom >>> M, n, r = 45, 13, 8 >>> k = 6 >>> nhypergeom.pmf(k, M, n, r) 0.06180776620271643 >>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1)) 0.06180776620271644 See Also -------- hypergeom, binom, nbinom References ---------- .. [1] Negative Hypergeometric Distribution on Wikipedia https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution .. [2] Negative Hypergeometric Distribution from http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf """ def _get_support(self, M, n, r): return 0, n def _argcheck(self, M, n, r): cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n) return cond def _logpmf(self, k, M, n, r): cond = ((r == 0) & (k == 0)) result = _lazywhere(~cond, (k, M, n, r), lambda k, M, n, r: (-betaln(k+1, r) + betaln(k+r, 1) - betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) + betaln(n+1, M-n+1) - betaln(M+1, 1)), fillvalue=0.0) return result def _pmf(self, k, M, n, r): # same as the following but numerically more precise # return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n) return exp(self._logpmf(k, M, n, r)) def _stats(self, M, n, r): # Promote the datatype to at least float # mu = rn / (M-n+1) M, n, r = 1.*M, 1.*n, 1.*r mu = r*n / (M-n+1) var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1)) # The skew and kurtosis are mathematically # intractable so return `None`. See [2]_. g1, g2 = None, None return mu, var, g1, g2 nhypergeom = nhypergeom_gen(name='nhypergeom') # FIXME: Fails _cdfvec class logser_gen(rv_discrete): r"""A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is: .. math:: f(k) = - \frac{p^k}{k \log(1-p)} for :math:`k \ge 1`, :math:`0 < p < 1` `logser` takes :math:`p` as shape parameter, where :math:`p` is the probability of a single success and :math:`1-p` is the probability of a single failure. %(after_notes)s %(example)s """ def _rvs(self, p, size=None, random_state=None): # looks wrong for p>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return random_state.logseries(p, size=size) def _argcheck(self, p): return (p > 0) & (p < 1) def _pmf(self, k, p): # logser.pmf(k) = - p**k / (k*log(1-p)) return -np.power(p, k) * 1.0 / k / special.log1p(-p) def _stats(self, p): r = special.log1p(-p) mu = p / (p - 1.0) / r mu2p = -p / r / (p - 1.0)**2 var = mu2p - mu*mu mu3p = -p / r * (1.0+p) / (1.0 - p)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / np.power(var, 1.5) mu4p = -p / r * ( 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1, name='logser', longname='A logarithmic') class poisson_gen(rv_discrete): r"""A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is: .. math:: f(k) = \exp(-\mu) \frac{\mu^k}{k!} for :math:`k \ge 0`. `poisson` takes :math:`\mu \geq 0` as shape parameter. When :math:`\mu = 0`, the ``pmf`` method returns ``1.0`` at quantile :math:`k = 0`. %(after_notes)s %(example)s """ # Override rv_discrete._argcheck to allow mu=0. def _argcheck(self, mu): return mu >= 0 def _rvs(self, mu, size=None, random_state=None): return random_state.poisson(mu, size) def _logpmf(self, k, mu): Pk = special.xlogy(k, mu) - gamln(k + 1) - mu return Pk def _pmf(self, k, mu): # poisson.pmf(k) = exp(-mu) * mu**k / k! return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k, mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k, mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q, mu)) vals1 = np.maximum(vals - 1, 0) temp = special.pdtr(vals1, mu) return np.where(temp >= q, vals1, vals) def _stats(self, mu): var = mu tmp = np.asarray(mu) mu_nonzero = tmp > 0 g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson') class planck_gen(rv_discrete): r"""A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) for :math:`k \ge 0` and :math:`\lambda > 0`. `planck` takes :math:`\lambda` as shape parameter. The Planck distribution can be written as a geometric distribution (`geom`) with :math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``. %(after_notes)s See Also -------- geom %(example)s """ def _argcheck(self, lambda_): return lambda_ > 0 def _pmf(self, k, lambda_): return -expm1(-lambda_)*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return -expm1(-lambda_*(k+1)) def _sf(self, x, lambda_): return exp(self._logsf(x, lambda_)) def _logsf(self, x, lambda_): k = floor(x) return -lambda_*(k+1) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(*(self._get_support(lambda_))) temp = self._cdf(vals1, lambda_) return np.where(temp >= q, vals1, vals) def _rvs(self, lambda_, size=None, random_state=None): # use relation to geometric distribution for sampling p = -expm1(-lambda_) return random_state.geometric(p, size=size) - 1.0 def _stats(self, lambda_): mu = 1/expm1(lambda_) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): C = -expm1(-lambda_) return lambda_*exp(-lambda_)/C - log(C) planck = planck_gen(a=0, name='planck', longname='A discrete exponential ') class boltzmann_gen(rv_discrete): r"""A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N)) for :math:`k = 0,..., N-1`. `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, lambda_, N): return (lambda_ > 0) & (N > 0) def _get_support(self, lambda_, N): return self.a, N - 1 def _pmf(self, k, lambda_, N): # boltzmann.pmf(k) = # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann', a=0, longname='A truncated discrete exponential ') class randint_gen(rv_discrete): r"""A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is: .. math:: f(k) = \frac{1}{\texttt{high} - \texttt{low}} for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`. `randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, low, high): return (high > low) def _get_support(self, low, high): return low, high-1 def _pmf(self, k, low, high): # randint.pmf(k) = 1./(high - low) p = np.ones_like(k) / (high - low) return np.where((k >= low) & (k < high), p, 0.) def _cdf(self, x, low, high): k = floor(x) return (k - low + 1.) / (high - low) def _ppf(self, q, low, high): vals = ceil(q * (high - low) + low) - 1 vals1 = (vals - 1).clip(low, high) temp = self._cdf(vals1, low, high) return np.where(temp >= q, vals1, vals) def _stats(self, low, high): m2, m1 = np.asarray(high), np.asarray(low) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d*d - 1) / 12.0 g1 = 0.0 g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) return mu, var, g1, g2 def _rvs(self, low, high, size=None, random_state=None): """An array of *size* random integers >= ``low`` and < ``high``.""" if np.asarray(low).size == 1 and np.asarray(high).size == 1: # no need to vectorize in that case return rng_integers(random_state, low, high, size=size) if size is not None: # NumPy's RandomState.randint() doesn't broadcast its arguments. # Use `broadcast_to()` to extend the shapes of low and high # up to size. Then we can use the numpy.vectorize'd # randint without needing to pass it a `size` argument. low = np.broadcast_to(low, size) high = np.broadcast_to(high, size) randint = np.vectorize(partial(rng_integers, random_state), otypes=[np.int_]) return randint(low, high) def _entropy(self, low, high): return log(high - low) randint = randint_gen(name='randint', longname='A discrete uniform ' '(random integer)') # FIXME: problems sampling. class zipf_gen(rv_discrete): r"""A Zipf (Zeta) discrete random variable. %(before_notes)s See Also -------- zipfian Notes ----- The probability mass function for `zipf` is: .. math:: f(k, a) = \frac{1}{\zeta(a) k^a} for :math:`k \ge 1`, :math:`a > 1`. `zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the Riemann zeta function (`scipy.special.zeta`) The Zipf distribution is also known as the zeta distribution, which is a special case of the Zipfian distribution (`zipfian`). %(after_notes)s References ---------- .. [1] "Zeta Distribution", Wikipedia, https://en.wikipedia.org/wiki/Zeta_distribution %(example)s Confirm that `zipf` is the large `n` limit of `zipfian`. >>> from scipy.stats import zipfian >>> k = np.arange(11) >>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000)) True """ def _rvs(self, a, size=None, random_state=None): return random_state.zipf(a, size=size) def _argcheck(self, a): return a > 1 def _pmf(self, k, a): # zipf.pmf(k, a) = 1/(zeta(a) * k**a) Pk = 1.0 / special.zeta(a, 1) / k**a return Pk def _munp(self, n, a): return _lazywhere( a > n + 1, (a, n), lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), np.inf) zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') def _gen_harmonic_gt1(n, a): """Generalized harmonic number, a > 1""" # See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz" return zeta(a, 1) - zeta(a, n+1) def _gen_harmonic_leq1(n, a): """Generalized harmonic number, a <= 1""" if not np.size(n): return n n_max = np.max(n) # loop starts at maximum of all n out = np.zeros_like(a, dtype=float) # add terms of harmonic series; starting from smallest to avoid roundoff for i in np.arange(n_max, 0, -1, dtype=float): mask = i <= n # don't add terms after nth out[mask] += 1/i**a[mask] return out def _gen_harmonic(n, a): """Generalized harmonic number""" n, a = np.broadcast_arrays(n, a) return _lazywhere(a > 1, (n, a), f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1) class zipfian_gen(rv_discrete): r"""A Zipfian discrete random variable. %(before_notes)s See Also -------- zipf Notes ----- The probability mass function for `zipfian` is: .. math:: f(k, a, n) = \frac{1}{H_{n,a} k^a} for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`, :math:`n \in \{1, 2, 3, \dots\}`. `zipfian` takes :math:`a` and :math:`n` as shape parameters. :math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic number of order :math:`a`. The Zipfian distribution reduces to the Zipf (zeta) distribution as :math:`n \rightarrow \infty`. %(after_notes)s References ---------- .. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law .. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf %(example)s Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`. >>> from scipy.stats import zipf >>> k = np.arange(11) >>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5)) True """ def _argcheck(self, a, n): # we need np.asarray here because moment (maybe others) don't convert return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int)) def _get_support(self, a, n): return 1, n def _pmf(self, k, a, n): return 1.0 / _gen_harmonic(n, a) / k**a def _cdf(self, k, a, n): return _gen_harmonic(k, a) / _gen_harmonic(n, a) def _sf(self, k, a, n): k = k + 1 # # to match SciPy convention # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1) / (k**a*_gen_harmonic(n, a))) def _stats(self, a, n): # see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf Hna = _gen_harmonic(n, a) Hna1 = _gen_harmonic(n, a-1) Hna2 = _gen_harmonic(n, a-2) Hna3 = _gen_harmonic(n, a-3) Hna4 = _gen_harmonic(n, a-4) mu1 = Hna1/Hna mu2n = (Hna2*Hna - Hna1**2) mu2d = Hna**2 mu2 = mu2n / mu2d g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2) g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2 - 3*Hna1**4) / mu2n**2 g2 -= 3 return mu1, mu2, g1, g2 zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian') class dlaplace_gen(rv_discrete): r"""A Laplacian discrete random variable. %(before_notes)s Notes ----- The probability mass function for `dlaplace` is: .. math:: f(k) = \tanh(a/2) \exp(-a |k|) for integers :math:`k` and :math:`a > 0`. `dlaplace` takes :math:`a` as shape parameter. %(after_notes)s %(example)s """ def _pmf(self, k, a): # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) return tanh(a/2.0) * exp(-a * abs(k)) def _cdf(self, x, a): k = floor(x) f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1) f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1) return _lazywhere(k >= 0, (k, a), f=f, f2=f2) def _ppf(self, q, a): const = 1 + exp(a) vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1, -log((1-q) * const) / a)) vals1 = vals - 1 return np.where(self._cdf(vals1, a) >= q, vals1, vals) def _stats(self, a): ea = exp(a) mu2 = 2.*ea/(ea-1.)**2 mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 return 0., mu2, 0., mu4/mu2**2 - 3. def _entropy(self, a): return a / sinh(a) - log(tanh(a/2.0)) def _rvs(self, a, size=None, random_state=None): # The discrete Laplace is equivalent to the two-sided geometric # distribution with PMF: # f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k) # Reference: # https://www.sciencedirect.com/science/ # article/abs/pii/S0378375804003519 # Furthermore, the two-sided geometric distribution is # equivalent to the difference between two iid geometric # distributions. # Reference (page 179): # https://pdfs.semanticscholar.org/61b3/ # b99f466815808fd0d03f5d2791eea8b541a1.pdf # Thus, we can leverage the following: # 1) alpha = e^-a # 2) probability_of_success = 1 - alpha (Bernoulli trial) probOfSuccess = -np.expm1(-np.asarray(a)) x = random_state.geometric(probOfSuccess, size=size) y = random_state.geometric(probOfSuccess, size=size) return x - y dlaplace = dlaplace_gen(a=-np.inf, name='dlaplace', longname='A discrete Laplacian') class skellam_gen(rv_discrete): r"""A Skellam discrete random variable. %(before_notes)s Notes ----- Probability distribution of the difference of two correlated or uncorrelated Poisson random variables. Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with expected values :math:`\lambda_1` and :math:`\lambda_2`. Then, :math:`k_1 - k_2` follows a Skellam distribution with parameters :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where :math:`\rho` is the correlation coefficient between :math:`k_1` and :math:`k_2`. If the two Poisson-distributed r.v. are independent then :math:`\rho = 0`. Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive. For details see: https://en.wikipedia.org/wiki/Skellam_distribution `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, mu1, mu2, size=None, random_state=None): n = size return (random_state.poisson(mu1, n) - random_state.poisson(mu2, n)) def _pmf(self, x, mu1, mu2): px = np.where(x < 0, _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) # ncx2.pdf() returns nan's for extremely low probabilities return px def _cdf(self, x, mu1, mu2): x = floor(x) px = np.where(x < 0, _ncx2_cdf(2*mu2, -2*x, 2*mu1), 1 - _ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) return px def _stats(self, mu1, mu2): mean = mu1 - mu2 var = mu1 + mu2 g1 = mean / sqrt((var)**3) g2 = 1 / var return mean, var, g1, g2 skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') class yulesimon_gen(rv_discrete): r"""A Yule-Simon discrete random variable. %(before_notes)s Notes ----- The probability mass function for the `yulesimon` is: .. math:: f(k) = \alpha B(k, \alpha+1) for :math:`k=1,2,3,...`, where :math:`\alpha>0`. Here :math:`B` refers to the `scipy.special.beta` function. The sampling of random variates is based on pg 553, Section 6.3 of [1]_. Our notation maps to the referenced logic via :math:`\alpha=a-1`. For details see the wikipedia entry [2]_. References ---------- .. [1] Devroye, Luc. "Non-uniform Random Variate Generation", (1986) Springer, New York. .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution %(after_notes)s %(example)s """ def _rvs(self, alpha, size=None, random_state=None): E1 = random_state.standard_exponential(size) E2 = random_state.standard_exponential(size) ans = ceil(-E1 / log1p(-exp(-E2 / alpha))) return ans def _pmf(self, x, alpha): return alpha * special.beta(x, alpha + 1) def _argcheck(self, alpha): return (alpha > 0) def _logpmf(self, x, alpha): return log(alpha) + special.betaln(x, alpha + 1) def _cdf(self, x, alpha): return 1 - x * special.beta(x, alpha + 1) def _sf(self, x, alpha): return x * special.beta(x, alpha + 1) def _logsf(self, x, alpha): return log(x) + special.betaln(x, alpha + 1) def _stats(self, alpha): mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1)) mu2 = np.where(alpha > 2, alpha**2 / ((alpha - 2.0) * (alpha - 1)**2), np.inf) mu2 = np.where(alpha <= 1, np.nan, mu2) g1 = np.where(alpha > 3, sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)), np.inf) g1 = np.where(alpha <= 2, np.nan, g1) g2 = np.where(alpha > 4, (alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha * (alpha - 4) * (alpha - 3)), np.inf) g2 = np.where(alpha <= 2, np.nan, g2) return mu, mu2, g1, g2 yulesimon = yulesimon_gen(name='yulesimon', a=1) def _vectorize_rvs_over_shapes(_rvs1): """Decorator that vectorizes _rvs method to work on ndarray shapes""" # _rvs1 must be a _function_ that accepts _scalar_ args as positional # arguments, `size` and `random_state` as keyword arguments. # _rvs1 must return a random variate array with shape `size`. If `size` is # None, _rvs1 must return a scalar. # When applied to _rvs1, this decorator broadcasts ndarray args # and loops over them, calling _rvs1 for each set of scalar args. # For usage example, see _nchypergeom_gen def _rvs(*args, size, random_state): _rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size) size = np.array(size) _rvs1_size = np.array(_rvs1_size) _rvs1_indices = np.array(_rvs1_indices) if np.all(_rvs1_indices): # all args are scalars return _rvs1(*args, size, random_state) out = np.empty(size) # out.shape can mix dimensions associated with arg_shape and _rvs1_size # Sort them to arg_shape + _rvs1_size for easy indexing of dimensions # corresponding with the different sets of scalar args j0 = np.arange(out.ndim) j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices])) out = np.moveaxis(out, j1, j0) for i in np.ndindex(*size[~_rvs1_indices]): # arg can be squeezed because singleton dimensions will be # associated with _rvs1_size, not arg_shape per _check_shape out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args], _rvs1_size, random_state) return np.moveaxis(out, j0, j1) # move axes back before returning return _rvs class _nchypergeom_gen(rv_discrete): r"""A noncentral hypergeometric discrete random variable. For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen. """ rvs_name = None dist = None def _get_support(self, M, n, N, odds): N, m1, n = M, n, N # follow Wikipedia notation m2 = N - m1 x_min = np.maximum(0, n - m2) x_max = np.minimum(n, m1) return x_min, x_max def _argcheck(self, M, n, N, odds): M, n = np.asarray(M), np.asarray(n), N, odds = np.asarray(N), np.asarray(odds) cond1 = (M.astype(int) == M) & (M >= 0) cond2 = (n.astype(int) == n) & (n >= 0) cond3 = (N.astype(int) == N) & (N >= 0) cond4 = odds > 0 cond5 = N <= M cond6 = n <= M return cond1 & cond2 & cond3 & cond4 & cond5 & cond6 def _rvs(self, M, n, N, odds, size=None, random_state=None): @_vectorize_rvs_over_shapes def _rvs1(M, n, N, odds, size, random_state): length = np.prod(size) urn = _PyStochasticLib3() rv_gen = getattr(urn, self.rvs_name) rvs = rv_gen(N, n, M, odds, length, random_state) rvs = rvs.reshape(size) return rvs return _rvs1(M, n, N, odds, size=size, random_state=random_state) def _pmf(self, x, M, n, N, odds): @np.vectorize def _pmf1(x, M, n, N, odds): urn = self.dist(N, n, M, odds, 1e-12) return urn.probability(x) return _pmf1(x, M, n, N, odds) def _stats(self, M, n, N, odds, moments): @np.vectorize def _moments1(M, n, N, odds): urn = self.dist(N, n, M, odds, 1e-12) return urn.moments() m, v = _moments1(M, n, N, odds) if ("m" in moments or "v" in moments) else None s, k = None, None return m, v, s, k class nchypergeom_fisher_gen(_nchypergeom_gen): r"""A Fisher's noncentral hypergeometric discrete random variable. Fisher's noncentral hypergeometric distribution models drawing objects of two types from a bin. `M` is the total number of objects, `n` is the number of Type I objects, and `odds` is the odds ratio: the odds of selecting a Type I object rather than a Type II object when there is only one object of each type. The random variate represents the number of Type I objects drawn if we take a handful of objects from the bin at once and find out afterwards that we took `N` objects. %(before_notes)s See Also -------- nchypergeom_wallenius, hypergeom, nhypergeom Notes ----- Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond with parameters `N`, `n`, and `M` (respectively) as defined above. The probability mass function is defined as .. math:: p(x; M, n, N, \omega) = \frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0}, for :math:`x \in [x_l, x_u]`, :math:`M \in {\mathbb N}`, :math:`n \in [0, M]`, :math:`N \in [0, M]`, :math:`\omega > 0`, where :math:`x_l = \max(0, N - (M - n))`, :math:`x_u = \min(N, n)`, .. math:: P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y, and the binomial coefficients are defined as .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. `nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with permission for it to be distributed under SciPy's license. The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not universally accepted; they are chosen for consistency with `hypergeom`. Note that Fisher's noncentral hypergeometric distribution is distinct from Wallenius' noncentral hypergeometric distribution, which models drawing a pre-determined `N` objects from a bin one by one. When the odds ratio is unity, however, both distributions reduce to the ordinary hypergeometric distribution. %(after_notes)s References ---------- .. [1] Agner Fog, "Biased Urn Theory". https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf .. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia, https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution %(example)s """ rvs_name = "rvs_fisher" dist = _PyFishersNCHypergeometric nchypergeom_fisher = nchypergeom_fisher_gen( name='nchypergeom_fisher', longname="A Fisher's noncentral hypergeometric") class nchypergeom_wallenius_gen(_nchypergeom_gen): r"""A Wallenius' noncentral hypergeometric discrete random variable. Wallenius' noncentral hypergeometric distribution models drawing objects of two types from a bin. `M` is the total number of objects, `n` is the number of Type I objects, and `odds` is the odds ratio: the odds of selecting a Type I object rather than a Type II object when there is only one object of each type. The random variate represents the number of Type I objects drawn if we draw a pre-determined `N` objects from a bin one by one. %(before_notes)s See Also -------- nchypergeom_fisher, hypergeom, nhypergeom Notes ----- Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond with parameters `N`, `n`, and `M` (respectively) as defined above. The probability mass function is defined as .. math:: p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x} \int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt for :math:`x \in [x_l, x_u]`, :math:`M \in {\mathbb N}`, :math:`n \in [0, M]`, :math:`N \in [0, M]`, :math:`\omega > 0`, where :math:`x_l = \max(0, N - (M - n))`, :math:`x_u = \min(N, n)`, .. math:: D = \omega(n - x) + ((M - n)-(N-x)), and the binomial coefficients are defined as .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. `nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with permission for it to be distributed under SciPy's license. The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not universally accepted; they are chosen for consistency with `hypergeom`. Note that Wallenius' noncentral hypergeometric distribution is distinct from Fisher's noncentral hypergeometric distribution, which models take a handful of objects from the bin at once, finding out afterwards that `N` objects were taken. When the odds ratio is unity, however, both distributions reduce to the ordinary hypergeometric distribution. %(after_notes)s References ---------- .. [1] Agner Fog, "Biased Urn Theory". https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf .. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia, https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution %(example)s """ rvs_name = "rvs_wallenius" dist = _PyWalleniusNCHypergeometric nchypergeom_wallenius = nchypergeom_wallenius_gen( name='nchypergeom_wallenius', longname="A Wallenius' noncentral hypergeometric") # Collect names of classes and objects in this module. pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) __all__ = _distn_names + _distn_gen_names
bsd-3-clause
terentjew-alexey/market-analysis-system
data/create_picture.py
1
1393
import time import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from mas_tools.data import timeseries_to_img lpath = 'E:/Projects/market-analysis-system/data/transformed/' spath = 'E:/Projects/market-analysis-system/data/test/' fn = 'GBPUSD240' window = 50 new_data = np.genfromtxt(lpath+fn+'.csv', delimiter=';') # data = new_data[800:920, 1:5] # plt.plot(data[:, 0]) # plt.plot(data[:, 1]) # plt.plot(data[:, 2]) # plt.plot(data[:, 3]) # plt.legend(['open', 'high', 'low', 'close']) # plt.savefig(spath + fn + '_plt.png') # plt.close() # plt.show() imgs = np.array([]) for idx in range(800, 900): image = timeseries_to_img(new_data[idx:idx+window, 1:5]) # image.save(spath + fn + '_pil_' + str(idx) + '.png', 'PNG') imgs = np.append(imgs, image) imgs = imgs.reshape((100, window*4, window*4, 3)) print(imgs.shape) print(imgs[2, :, :, 1]) print(imgs.dtype) # imgs = imgs.reshape((100, (window*4+1) * (window*4+1) * 3)) # np.savetxt(spath + '_numpy_array.csv', imgs, # delimiter=';', fmt='%.0f') # np.savez_compressed(spath + '_numpy_array.npz', imgs) # print('Saved\nNow read and save as image') # loaded_imgs = np.load(spath + '_numpy_array.npz') # print(loaded_imgs.f.arr_0.shape) # from PIL import Image # pic = np.reshape(imgs[99], (window*4+1, window*4+1, 3)).astype(np.float) # pic = Image.fromarray(pic) # pic.show()
mit
AnasGhrab/scikit-learn
sklearn/mixture/tests/test_dpgmm.py
261
4490
import unittest import sys import numpy as np from sklearn.mixture import DPGMM, VBGMM from sklearn.mixture.dpgmm import log_normalize from sklearn.datasets import make_blobs from sklearn.utils.testing import assert_array_less, assert_equal from sklearn.mixture.tests.test_gmm import GMMTester from sklearn.externals.six.moves import cStringIO as StringIO np.seterr(all='warn') def test_class_weights(): # check that the class weights are updated # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50) dpgmm.fit(X) # get indices of components that are used: indices = np.unique(dpgmm.predict(X)) active = np.zeros(10, dtype=np.bool) active[indices] = True # used components are important assert_array_less(.1, dpgmm.weights_[active]) # others are not assert_array_less(dpgmm.weights_[~active], .05) def test_verbose_boolean(): # checks that the output for the verbose output is the same # for the flag values '1' and 'True' # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm_bool = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=True) dpgmm_int = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: # generate output with the boolean flag dpgmm_bool.fit(X) verbose_output = sys.stdout verbose_output.seek(0) bool_output = verbose_output.readline() # generate output with the int flag dpgmm_int.fit(X) verbose_output = sys.stdout verbose_output.seek(0) int_output = verbose_output.readline() assert_equal(bool_output, int_output) finally: sys.stdout = old_stdout def test_verbose_first_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_verbose_second_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_log_normalize(): v = np.array([0.1, 0.8, 0.01, 0.09]) a = np.log(2 * v) assert np.allclose(v, log_normalize(a), rtol=0.01) def do_model(self, **kwds): return VBGMM(verbose=False, **kwds) class DPGMMTester(GMMTester): model = DPGMM do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester): covariance_type = 'full' setUp = GMMTester._setUp class VBGMMTester(GMMTester): model = do_model do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester): covariance_type = 'full' setUp = GMMTester._setUp
bsd-3-clause
darcyabjones/bioplotlib
bioplotlib/collections.py
1
14622
""" Extension of matplotlib collections. Classes for the efficient drawing of large collections of objects that share most properties, e.g., a large number of line segments or polygons. The classes are not meant to be as flexible as their single element counterparts (e.g., you may not be able to select all line styles) but they are meant to be fast for common use cases (e.g., a large set of solid line segemnts) """ ############################ Import all modules ############################## from __future__ import (absolute_import, division, print_function, unicode_literals) from copy import copy from collections import defaultdict import numpy as np import matplotlib.transforms as transforms from matplotlib.path import Path from matplotlib.patches import PathPatch from matplotlib.patches import Patch from matplotlib.collections import Collection import bioplotlib.feature_shapes from bioplotlib.feature_shapes import Triangle from bioplotlib.feature_shapes import OpenTriangle __contributors = [ "Darcy Jones <darcy.ab.jones@gmail.com>" ] def new_shape(c, **kwargs): """ . """ def callable(*a, **k): kwargs.update(k) return c(*a, **kwargs) return callable ################################## Classes ################################### class Feature(Collection): """ Groups shapes into features so that they stay together in stacked tracks. """ def __init__( self, blocks, shape=new_shape(Triangle), between_shape=None, strand=None, offset=0, by_axis=None, name=None, **kwargs ): if type(shape) in (tuple, list): self._shape = shape else: self._shape = [shape] if type(between_shape) in (tuple, list, type(None)): self._between_shape = between_shape else: self._between_shape = [between_shape] self._blocks = blocks self._strand = strand self._offset = offset self._by_axis = by_axis self._name = name self._paths = None self._patches = None Collection.__init__(self, **kwargs) self._draw_patches() return @property def blocks(self): return self._blocks @blocks.setter def blocks(self, blocks): self._blocks = blocks @property def strand(self): return self._strand @strand.setter def strand(self, strand): self._strand = strand for patch in self.patches: if patch.strand is not None: patch.strand = strand @property def offset(self): return self._offset @offset.setter def offset(self, offset): self._offset = offset if offset != 0: for patch in self.patches: patch.offset += offset @property def by_axis(self): return self._by_axis @by_axis.setter def by_axis(self, by_axis): self._by_axis = by_axis if by_axis is not None: for patch in self.patches: if patch.by_axis is not None: patch.by_axis = by_axis @property def shape(self): return self._shape @shape.setter def shape(self, shape): self._shape = shape self._draw_patches() @property def between_shape(self): return self._between_shape @between_shape.setter def between_shape(self, shape): self._between_shape = shape self._draw_patches() @property def patches(self): return self._patches @patches.setter def patches(self, patches): self._patches = patches self._set_paths() self._set_props() return @property def paths(self): return self._paths @paths.setter def paths(self, paths): self._paths = paths return def get_paths(self): """ alias for paths property """ return self.paths def set_paths(self, paths): """ alias for paths.setter """ self.paths = paths return def _set_paths(self): self.paths = [ p.get_transform().transform_path(p.get_path()) for p in self.patches ] return def _set_props(self): def determine_facecolor(patch): if patch.get_fill(): return patch.get_facecolor() return [0, 0, 0, 0] props = defaultdict(list) valid = { "facecolors": (self.set_facecolor, determine_facecolor), "edgecolors": (self.set_edgecolor, Patch.get_edgecolor), "linewidths": (self.set_linewidth, Patch.get_linewidth), "linestyles": (self.set_linestyle, Patch.get_linestyle), "antialiaseds": (self.set_antialiased, Patch.get_antialiased), } for p in self.patches: for key, (set_, get) in valid.items(): props[key].append(get(p)) for key, (set_, get) in valid.items(): set_(props[key]) return def _draw_patches(self): start = 0 end = None patches = list() length_blocks = len(self.blocks) # Draw the between shapes first between_shape_pos = 0 between_blocks = [] if self.between_shape is not None: between_blocks = list(zip( range(0, length_blocks - 1), range(1, length_blocks) )) length_between_blocks = len(between_blocks) # Note that if between shape is none there is nothing to iterate over. for h, (i, j) in enumerate(between_blocks): first = self.blocks[i] second = self.blocks[j] min_dist = min([(0, 0), (0, 1), (1, 0), (1, 1)], key=lambda t: abs(second[t[0]] - first[t[1]]) ) start = first[min_dist[1]] end = second[min_dist[0]] strand_first = None strand_second = None try: strand_first = first[2] strand_second = second[2] except: pass if strand_first == strand_second: strand = strand_first else: strand = None if len(self.between_shape) > 1 and h + 1 == length_between_blocks: between_shape_pos = -1 patches.append(self.between_shape[between_shape_pos]( start=start, end=end, strand=None )) if len(self.between_shape) > 2: between_shape_pos = 1 # Now do the blocks shape_pos = 0 for i, block in enumerate(self.blocks): if len(self.shape) > 1 and i + 1 == length_blocks: shape_pos = -1 start = block[0] end = block[1] strand = None try: strand = block[2] except: pass patches.append(self.shape[shape_pos]( start=start, end=end, strand=strand )) if len(self.shape) > 2: shape_pos = 1 if self.offset != 0: for patch in patches: patch.offset += self.offset self._patches = patches self._set_paths() self._set_props() return class FeatureGroup(Collection): """ Generic collection for genomic tracks. Methods ------- stack Determines how to stack features in a track """ def __init__( self, features, width=1, offset=0, stack=None, by_axis=None, name=None, **kwargs ): """ . """ self._features = features self._width = width self._stack = stack self._by_axis = by_axis self._offset = offset self.name = name self._patches = None self._paths = None Collection.__init__(self, **kwargs) self._draw_patches() return @property def features(self): return self._features @features.setter def features(self, features): """ . """ self._features = features self._draw_patches() return @property def offset(self): return self._offset @offset.setter def offset(self, offset): self._offset = offset self._draw_patches() return @property def by_axis(self): return self._by_axis @by_axis.setter def by_axis(self, by_axis): self._by_axis = by_axis if by_axis is not None: for patch in self.patches: if patch.by_axis is not None: patch.by_axis = by_axis return @property def paths(self): return self._paths @paths.setter def paths(self, paths): self._paths = paths return def get_paths(self): """ alias for paths property """ return self.paths def set_paths(self, paths): """ alias for paths.setter """ self.paths = paths return def _set_paths(self): self.paths = [ p.get_transform().transform_path(p.get_path()) for p in self._patches ] return def _set_props(self): def determine_facecolor(patch): if patch.get_fill(): return patch.get_facecolor() return [0, 0, 0, 0] props = defaultdict(list) valid = { "facecolors": (self.set_facecolor, determine_facecolor), "edgecolors": (self.set_edgecolor, Patch.get_edgecolor), "linewidths": (self.set_linewidth, Patch.get_linewidth), "linestyles": (self.set_linestyle, Patch.get_linestyle), "antialiaseds": (self.set_antialiased, Patch.get_antialiased), } for p in self._patches: for key, (set_, get) in valid.items(): props[key].append(get(p)) for key, (set_, get) in valid.items(): set_(props[key]) return def _draw_patches(self): """ . """ patches = list() for feature in self.features: if self._offset != 0: feature.offset = self._offset if isinstance(feature, Patch): patches.append(feature) elif isinstance(feature, Feature): patches.extend(feature.patches) elif isinstance(feature, FeatureGroup): patches.extend(feature.patches) else: pass self._patches = patches return class LinkCollection(object): """ . """ def __init__( self, obj, links=list(), add_to_fig=False, by=None ): """ . """ self.links = list() self.links.extend(links) self.obj = obj self.add_to_fig = add_to_fig self.by = by lobj = obj() self.figure = lobj.figure return def add(self, blocks, by=None, **kwargs): """ Keyword arguments: blocks -- a 2, 3, or 4 dimensional array 2 dimensions: [[ax1_x1, ax1_x2], [ax2_x1, ax1_x2]] # single record 3 dimensions: [ [[ax1_x1, ax1_x2], [ax2_x1, ax2_x2]], [[ax1_x1, ax1_x2], [ax2_x1, ax2_x2]], ... ] 4 dimensions: [ [ [[ax1_x1, ax1_x2], [ax1_y1, ax1_y2]], [[ax2_x1, ax2_x2], [ax2_y1, ax2_y2]], ], [ [[ax1_x1, ax1_x2], [ax1_y1, ax1_y2]], [[ax2_x1, ax2_x2], [ax2_y1, ax2_y2]], ], ... ] """ by = self.by if by is None else by obj = self.obj if len(blocks) == 0: return list() blocks = np.array(blocks) if len(blocks.shape) == 2: if by == 'x' or self.by is None: pblocks = np.array([ [[ax_xrange, None] for ax_xrange in blocks] ]) elif by == 'y': pblocks = np.array([ [[None, ax_yrange] for ax_yrange in blocks] ]) elif len(blocks.shape) == 3: if by == 'x' or self.by is None: pblocks = np.array([ [[ax1_xrange, None], [ax2_xrange, None]] for ax1_xrange, ax2_xrange in blocks ]) elif by == 'y': pblocks = np.array([ [[None, ax1_yrange], [None, ax2_yrange]] for ax1_yrange, ax2_yrange in blocks ]) else: pblocks = blocks new_links = list() for block in pblocks: ((ax1_xrange, ax1_yrange), (ax2_xrange, ax2_yrange)) = block params = dict() #copy(kwargs) params['by'] = by params['ax1_xrange'] = ax1_xrange params['ax1_yrange'] = ax1_yrange params['ax2_xrange'] = ax2_xrange params['ax2_yrange'] = ax2_yrange params = {k:v for k, v in params.items() if v is not None} lobj = obj() lobj.__dict__.update(params) lobj.properties.update(kwargs) self.links.append(lobj) new_links.append(lobj) return new_links def __call__(self): return self.draw() def draw(self): """ . """ patches = list() for link in self.links: if not link.in_limits(): continue path = link.draw() link_patch = PathPatch( path, transform=self.figure.transFigure, **link.properties ) rax = link.ax1 qax = link.ax2 patches.append(link_patch) if self.add_to_fig: self.figure.patches.extend(patches) return patches ################################# Functions ##################################
bsd-3-clause
dogwood008/DeepFX
histdata_converter.py
1
2444
# coding: utf-8 # In[ ]: # histdata.comでDLした1分足のデータを任意の足に変換する # http://www.histdata.com/download-free-forex-historical-data/?/ascii/1-minute-bar-quotes/usdjpy/2017/10 # In[ ]: import pandas as pd import numpy as np from hist_data import HistData, BitcoinHistData # In[ ]: def get_new_index(old_dataframe, freq='5min'): start = hd.data()[0:1].index[0] end = hd.data()[-1:].index[0] new_index = pd.date_range(start, end, None, freq) return new_index def create_dataframe(dataarray): new_df = pd.DataFrame.from_records(dataarray, index=['Date'], columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume']) return new_df def create_new_dataarray(hist_data, new_index, i): old_dataframe = hist_data.data() start = new_index[i].to_pydatetime() end = new_index[i+1].to_pydatetime() slice = old_dataframe.loc[start:end][:-1] if len(slice) is 0: return open = slice.ix[0:1]['Open'][0] high = max(slice['High']) low = min(slice['Low']) close = slice.ix[-1:]['Close'][0] if type(hist_data) == HistData: volume = slice.sum()['Volume'] elif type(hist_data) == BitcoinHistData: volume = slice.sum()['Volume_(BTC)'] return np.array([start, open, high, low, close, volume]) def create_new_dataframe(hist_data, freq='5min'): old_dataframe = hist_data.data() new_index = get_new_index(old_dataframe, freq) datalist = [create_new_dataarray(hist_data, new_index, i) for i in range(len(new_index) - 1)] none_removed_array = np.array([x for x in datalist if x is not None]) new_df = create_dataframe(none_removed_array) return new_df # In[ ]: if False: read_filepath = 'historical_data/DAT_ASCII_USDJPY_M1_201710.csv' write_filepath = 'historical_data/DAT_ASCII_USDJPY_M1_201710_h1.csv' hd = HistData(read_filepath) new_df = create_new_dataframe(hd, freq='1h') new_df.to_csv(write_filepath, sep=';', header=['Open', 'High', 'Low', 'Close', 'Volume']) # In[ ]: if True: read_filepath = 'historical_data/coincheckJPY_1-min_data_2014-10-31_to_2017-10-20.csv' write_filepath = 'historical_data/coincheckJPY_1-min_data_2014-10-31_to_2017-10-20_h1.csv' hd = BitcoinHistData(read_filepath) new_df = create_new_dataframe(hd, freq='1h') new_df.to_csv(write_filepath, sep=';', header=['Open', 'High', 'Low', 'Close', 'Volume'])
mit
sys-bio/tellurium
examples/notebooks-py/tellurium_stochastic.py
2
2082
# coding: utf-8 # Back to the main [Index](../index.ipynb) # #### Stochastic simulation # # Stochastic simulations can be run by changing the current integrator type to 'gillespie' or by using the `r.gillespie` function. # In[1]: #!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!! from __future__ import print_function import tellurium as te te.setDefaultPlottingEngine('matplotlib') get_ipython().magic(u'matplotlib inline') import numpy as np r = te.loada('S1 -> S2; k1*S1; k1 = 0.1; S1 = 40') r.integrator = 'gillespie' r.integrator.seed = 1234 results = [] for k in range(1, 50): r.reset() s = r.simulate(0, 40) results.append(s) r.plot(s, show=False, alpha=0.7) te.show() # #### Seed # # Setting the identical seed for all repeats results in identical traces in each simulation. # In[2]: results = [] for k in range(1, 20): r.reset() r.setSeed(123456) s = r.simulate(0, 40) results.append(s) r.plot(s, show=False, loc=None, color='black', alpha=0.7) te.show() # #### Combining Simulations # # You can combine two timecourse simulations and change e.g. parameter values in between each simulation. The `gillespie` method simulates up to the given end time `10`, after which you can make arbitrary changes to the model, then simulate again. # # When using the `te.plot` function, you can pass the parameter `names`, which controls the names that will be used in the figure legend, and `tags`, which ensures that traces with the same tag will be drawn with the same color. # In[3]: import tellurium as te import numpy as np r = te.loada('S1 -> S2; k1*S1; k1 = 0.02; S1 = 100') r.setSeed(1234) for k in range(1, 20): r.resetToOrigin() res1 = r.gillespie(0, 10) # change in parameter after the first half of the simulation r.k1 = r.k1*20 res2 = r.gillespie (10, 20) sim = np.vstack([res1, res2]) te.plot(sim[:,0], sim[:,1:], alpha=0.7, names=['S1', 'S2'], tags=['S1', 'S2'], show=False) te.show()
apache-2.0
saulberardo/MagikEDA
test/univarTest.py
1
1687
""" Test Case for module univar.py """ import unittest import matplotlib.pyplot as plt import pandas as pd import numpy as np from magikeda import univar class UnivarTestCase(unittest.TestCase): def test_plot_bar_chart(self): # Test series with categorical data d1 = pd.Series(pd.Categorical(['c', 'a', 'c', 'a', 'c', 'b'], ['c', 'b', 'a'])) univar.plot_bar_chart(d1) # Test series with string data d2 = pd.Series(['b', 'b', 'b', 'a', 'b', 'c']) univar.plot_bar_chart(d2) # Test both series at the same time univar.plot_bar_chart([d1, d2]) def test_dataframe_profile(self): # Plot a simple data frame with two categorical variables and one numerical d1 = pd.Series(pd.Categorical(['c', 'a', 'c', 'a', 'c', 'b'], ['c', 'b', 'a'])) d2 = pd.Series(['b', 'b', 'b', 'a', 'b', 'c']) d3 = pd.Series(np.random.rand(6)) univar.plot_dataframe_profile( pd.DataFrame({'Var 1': d1, 'Var 2': d2, 'Var 3': d3}), default_ylabel='Time (%)', ylabels={'Var 1': 'Label of Var 1'}, default_xlabel='State', xlabels={'Var 2': 'X lable of Var 2'} ) def test_add_extra_xaxis(self): # Create a new figure f, _ = plt.subplots() # Plota a curve plt.plot([1,2,3,2,5]) # Add a new xaxis univar.add_extra_xaxis(f, [1,4],['a','b']) def setUp(self): # Disable auto-plotting plt.ioff() def tearDown(self): # Close all figures created (probably it is not necessary) plt.close('all') if __name__ == '__main__': unittest.main()
gpl-2.0
Lawrence-Liu/scikit-learn
sklearn/preprocessing/tests/test_label.py
156
17626
import numpy as np from scipy.sparse import issparse from scipy.sparse import coo_matrix from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.utils.multiclass import type_of_target from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.preprocessing.label import LabelBinarizer from sklearn.preprocessing.label import MultiLabelBinarizer from sklearn.preprocessing.label import LabelEncoder from sklearn.preprocessing.label import label_binarize from sklearn.preprocessing.label import _inverse_binarize_thresholding from sklearn.preprocessing.label import _inverse_binarize_multiclass from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_label_binarizer(): lb = LabelBinarizer() # one-class case defaults to negative label inp = ["pos", "pos", "pos", "pos"] expected = np.array([[0, 0, 0, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) # two-class case inp = ["neg", "pos", "pos", "neg"] expected = np.array([[0, 1, 1, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["neg", "pos"]) assert_array_equal(expected, got) to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) assert_array_equal(lb.inverse_transform(to_invert), inp) # multi-class case inp = ["spam", "ham", "eggs", "ham", "0"] expected = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]) got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam']) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) def test_label_binarizer_unseen_labels(): lb = LabelBinarizer() expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) got = lb.fit_transform(['b', 'd', 'e']) assert_array_equal(expected, got) expected = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]) got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f']) assert_array_equal(expected, got) def test_label_binarizer_set_label_encoding(): lb = LabelBinarizer(neg_label=-2, pos_label=0) # two-class case with pos_label=0 inp = np.array([0, 1, 1, 0]) expected = np.array([[-2, 0, 0, -2]]).T got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) lb = LabelBinarizer(neg_label=-2, pos_label=2) # multi-class case inp = np.array([3, 2, 1, 2, 0]) expected = np.array([[-2, -2, -2, +2], [-2, -2, +2, -2], [-2, +2, -2, -2], [-2, -2, +2, -2], [+2, -2, -2, -2]]) got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) @ignore_warnings def test_label_binarizer_errors(): # Check that invalid arguments yield ValueError one_class = np.array([0, 0, 0, 0]) lb = LabelBinarizer().fit(one_class) multi_label = [(2, 3), (0,), (0, 2)] assert_raises(ValueError, lb.transform, multi_label) lb = LabelBinarizer() assert_raises(ValueError, lb.transform, []) assert_raises(ValueError, lb.inverse_transform, []) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2) assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2, sparse_output=True) # Fail on y_type assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2], threshold=0) # Sequence of seq type should raise ValueError y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs) # Fail on the number of classes assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2, 3], threshold=0) # Fail on the dimension of 'binary' assert_raises(ValueError, _inverse_binarize_thresholding, y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary", classes=[1, 2, 3], threshold=0) # Fail on multioutput data assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]])) assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]), [1, 2, 3]) def test_label_encoder(): # Test LabelEncoder's transform and inverse_transform methods le = LabelEncoder() le.fit([1, 1, 4, 5, -1, 0]) assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1]) assert_raises(ValueError, le.transform, [0, 6]) def test_label_encoder_fit_transform(): # Test fit_transform le = LabelEncoder() ret = le.fit_transform([1, 1, 4, 5, -1, 0]) assert_array_equal(ret, [2, 2, 3, 4, 0, 1]) le = LabelEncoder() ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"]) assert_array_equal(ret, [1, 1, 2, 0]) def test_label_encoder_errors(): # Check that invalid arguments yield ValueError le = LabelEncoder() assert_raises(ValueError, le.transform, []) assert_raises(ValueError, le.inverse_transform, []) # Fail on unseen labels le = LabelEncoder() le.fit([1, 2, 3, 1, -1]) assert_raises(ValueError, le.inverse_transform, [-1]) def test_sparse_output_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for sparse_output in [True, False]: for inp in inputs: # With fit_tranform mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit_transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit(inp()).transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) assert_raises(ValueError, mlb.inverse_transform, csr_matrix(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]]))) def test_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for inp in inputs: # With fit_tranform mlb = MultiLabelBinarizer() got = mlb.fit_transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer() got = mlb.fit(inp()).transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) def test_multilabel_binarizer_empty_sample(): mlb = MultiLabelBinarizer() y = [[1, 2], [1], []] Y = np.array([[1, 1], [1, 0], [0, 0]]) assert_array_equal(mlb.fit_transform(y), Y) def test_multilabel_binarizer_unknown_class(): mlb = MultiLabelBinarizer() y = [[1, 2]] assert_raises(KeyError, mlb.fit(y).transform, [[0]]) mlb = MultiLabelBinarizer(classes=[1, 2]) assert_raises(KeyError, mlb.fit_transform, [[0]]) def test_multilabel_binarizer_given_classes(): inp = [(2, 3), (1,), (1, 2)] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # fit().transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # ensure works with extra class mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat))) assert_array_equal(mlb.classes_, [4, 1, 3, 2]) # ensure fit is no-op as iterable is not consumed inp = iter(inp) mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) def test_multilabel_binarizer_same_length_sequence(): # Ensure sequences of the same length are not interpreted as a 2-d array inp = [[1], [0], [2]] indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) def test_multilabel_binarizer_non_integer_labels(): tuple_classes = np.empty(3, dtype=object) tuple_classes[:] = [(1,), (2,), (3,)] inputs = [ ([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']), ([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']), ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) for inp, classes in inputs: # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) mlb = MultiLabelBinarizer() assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})]) def test_multilabel_binarizer_non_unique(): inp = [(1, 1, 1, 0)] indicator_mat = np.array([[1, 1]]) mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) def test_multilabel_binarizer_inverse_validation(): inp = [(1, 1, 1, 0)] mlb = MultiLabelBinarizer() mlb.fit_transform(inp) # Not binary assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]])) # The following binary cases are fine, however mlb.inverse_transform(np.array([[0, 0]])) mlb.inverse_transform(np.array([[1, 1]])) mlb.inverse_transform(np.array([[1, 0]])) # Wrong shape assert_raises(ValueError, mlb.inverse_transform, np.array([[1]])) assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]])) def test_label_binarize_with_class_order(): out = label_binarize([1, 6], classes=[1, 2, 4, 6]) expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) assert_array_equal(out, expected) # Modified class order out = label_binarize([1, 6], classes=[1, 6, 4, 2]) expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) assert_array_equal(out, expected) out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(out, expected) def check_binarized_results(y, classes, pos_label, neg_label, expected): for sparse_output in [True, False]: if ((pos_label == 0 or neg_label != 0) and sparse_output): assert_raises(ValueError, label_binarize, y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) continue # check label_binarize binarized = label_binarize(y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) # check inverse y_type = type_of_target(y) if y_type == "multiclass": inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding(binarized, output_type=y_type, classes=classes, threshold=((neg_label + pos_label) / 2.)) assert_array_equal(toarray(inversed), toarray(y)) # Check label binarizer lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert_equal(issparse(inverse_output), issparse(y)) def test_label_binarize_binary(): y = [0, 1, 0] classes = [0, 1] pos_label = 2 neg_label = -1 expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected # Binary case where sparse_output = True will not result in a ValueError y = [0, 1, 0] classes = [0, 1] pos_label = 3 neg_label = 0 expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) yield check_binarized_results, y, classes, pos_label, neg_label, expected assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_label_binarize_multilabel(): y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = pos_label * y_ind y_sparse = [sparse_matrix(y_ind) for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix]] for y in [y_ind] + y_sparse: yield (check_binarized_results, y, classes, pos_label, neg_label, expected) assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_invalid_input_label_binarize(): assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2], pos_label=0, neg_label=1) def test_inverse_binarize_multiclass(): got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3)) assert_array_equal(got, np.array([1, 1, 0]))
bsd-3-clause
shikhardb/scikit-learn
examples/linear_model/plot_sgd_weighted_samples.py
344
1458
""" ===================== SGD: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight = 100 * np.abs(np.random.randn(20)) # and assign a bigger weight to the last 10 samples sample_weight[:10] *= 10 # plot the weighted data points xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9, cmap=plt.cm.bone) ## fit the unweighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid']) ## fit the weighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y, sample_weight=sample_weight) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed']) plt.legend([no_weights.collections[0], samples_weights.collections[0]], ["no weights", "with weights"], loc="lower left") plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
jakereimer/pipeline
python/pipeline/legacy/aodtrk.py
6
18511
import datajoint as dj import pandas as pd from . import aodpre import warnings from IPython import embed import glob import numpy as np import dateutil.parser from . import utils import cv2 import os,shutil try: from pupil_tracking.pupil_tracker_aod import PupilTracker except ImportError: warnings.warn("Failed to import pupil_tacking library. You won't be able to populate trk.EyeFrame") schema = dj.schema('pipeline_aod_pupiltracking', locals()) @schema class TrackInfo(dj.Imported): definition = """ # machine independent path of eye videos ->aodpre.Scan --- base_video_path: varchar(100) # base path of the video """ def _make_tuples(self, key): print("key = ", key) # embed() path = (aodpre.Scan() & key).fetch1['hdf5_file'] path.replace('\\', '/') # words = path.split('\\') # if len(words) == 1: words = words[0].split('/') i = words.index('Mouse') ymd = words[i + 3].split('_')[0] hms = words[i + 3].split('_')[1].replace("-", ":") time_hdf5 = dateutil.parser.parse("{ymd} {hms}".format(ymd=ymd, hms=hms)) # time_str = words[i+3].split('_')[1].split('-') # time_hdf5 = int(time_str[0])*10000 + int(time_str[1])*100 + int(time_str[2]) folders = glob.glob(r"/m/Mouse/{f1}/20*".format(f1=words[i + 1])) time_coll = [] time_diff = [] for name in folders: ymd = name.split('/')[4].split('_')[0] hms = name.split('/')[4].split('_')[1].replace("-", ":") # t = name.split('/')[-1].split('_')[1].split('-') # time = int(t[0])*10000 + int(t[1])*100 + int(t[2]) time = dateutil.parser.parse("{ymd} {hms}".format(ymd=ymd, hms=hms)) time_coll.append(time) diff = abs((time_hdf5 - time).total_seconds()) time_diff.append(diff) time_diff = np.asarray(time_diff) fo = folders[np.argmin(abs(time_diff))] avi_path = glob.glob(r"{fo}/*.avi".format(fo=fo)) assert len(avi_path) == 1, "Found 0 or more than 1 videos: {videos}".format(videos=str(avi_path)) key['base_video_path'] = avi_path[0] self.insert1(key) def get_frames(self, key): # path = (aodpre.Scan() & key).fetch1['hdf5_file'] video_file = (self & key).fetch1['base_video_path'] # embed() import cv2 cap = cv2.VideoCapture(video_file) fr_count = 0 while cap.isOpened(): fr_count += 1 ret, frame = cap.read() if fr_count == 1000: return frame @schema class Roi(dj.Manual): definition = """ # table that stores the correct ROI of the Eye in the video ->TrackInfo --- x_roi_min : int # x coordinate of roi y_roi_min : int # y coordinate of roi x_roi_max : int # x coordinate of roi y_roi_max : int # y coordinate of roi """ def dump_video(self): print("Entered dump") vid_coll = self.fetch.as_dict() for video in vid_coll: video_path = (TrackInfo() & video).fetch1['base_video_path'] if not (EyeFrame() & video): print("EyeFrame for (mouse_id,scan_idx)= (", video['mouse_id'], video['scan_idx'], ") not found. Please populate EyeFrame before dumping video") else: print("Dumping video for parameters (mouse_id,scan_idx) = (", video['mouse_id'], video['scan_idx'], ")") try: shutil.rmtree("temp_images") except: pass # print("Debug2") os.makedirs("temp_images") cap = cv2.VideoCapture(video_path) length_video = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fr_count = 0 # print("Debug 1") while cap.isOpened(): ret, frame = cap.read() fr_count += 1 # print("Debug 3") if fr_count % 1000 == 0: print("Processing frame = ", fr_count, "/", length_video) # break if fr_count % 6 == 0: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if fr_count >= (length_video-10): print("Video: ", video_path, " is over") break data = (EyeFrame.Detection() & video & dict(frame=fr_count)).fetch.as_dict() if data: data = data[0] ellipse = ((int(data['pupil_x']),int(data['pupil_y'])),(int(data['pupil_r_minor']),int(data['pupil_r_major'])), int(data['pupil_angle'])) _ = cv2.ellipse(gray, ellipse, (0, 0, 255), 2) name = "temp_images/img%06d.png" % (fr_count,) cv2.imwrite(name, gray) print("Dumped images for parameters (mouse_id,scan_idx) = (", video['mouse_id'], video['scan_idx'], ")") print("Stitching images into a video") file_name = "video_%s_%s.mp4" % (video['mouse_id'], video['scan_idx']) try: os.remove(file_name) except: pass command = "ffmpeg -f image2 -pattern_type glob -i 'temp_images/*.png' "+file_name # command = "ffmpeg -framerate 5 -i temp_images\%06d.png -c:v libx264 -r 5 -pix_fmt yuv420p "+file_name os.system(command) # embed() try: shutil.rmtree("temp_images") except: pass @schema class ParamEyeFrame(dj.Lookup): definition = """ # table that stores the paths for the params for pupil_tracker pupil_tracker_param_id : int # id for param collection --- convex_weight_high = Null : float # parameter for weighting higher pixel intensity value to decide threshold. condition = if (maxr < radius1 - p * (pow(pow((center1[0] - full_patch_size / 2), 2) + pow((center1[1] - full_patch_size / 2), 2), po)) and (center1[1] > ll * full_patch_size) and (center1[1] < rl * full_patch_size) and (center1[0] > ll * full_patch_size) and (center1[0] < rl * full_patch_size) and (radius1 > mir) and (radius1 < mar) and len(contours1[j]) >= 5): convex_weight_low = Null : float # parameter for weighting lower pixel intensity for threshold thres_perc_high = Null : float # percentile parameter to pick most bright pixel value thres_perc_low = Null : float # percentile parameter to pick least bright pixel value pupil_left_limit = Null : float # parameter in percentage to restrict pupil centre in roi pupil_right_limit = Null : float # parameter in percentage to restrict pupil centre in roi min_radius = Null : float # parameter to restrict pupil radius while selecting pupil from multiple contours max_radius = Null : float # parameter to restrict pupil radius while selecting pupil from multiple contours centre_dislocation_penalty : float # parameter for penalty as to force selection of contour which is in the centre as pupil distance_sq_pow : float # parameter for selecting method of calculating distance for penalty """ contents = [ {'pupil_tracker_param_id': 0, 'convex_weight_high': 0.5, 'convex_weight_low': 0.5, 'thres_perc_high': 99, 'distance_sq_pow': 1, 'thres_perc_low': 1, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180, 'centre_dislocation_penalty': 0.001}, {'pupil_tracker_param_id': 1, 'convex_weight_high': 0.75, 'convex_weight_low': 0.25, 'thres_perc_high': 97, 'distance_sq_pow': 0.5, 'thres_perc_low': 3, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180, 'centre_dislocation_penalty': 0.05} ] @schema class EyeFrame(dj.Computed): definition = """ # eye tracking info for each frame of a movie -> Roi -> ParamEyeFrame frame : int # frame number in movie --- eye_frame_ts=CURRENT_TIMESTAMP : timestamp # automatic """ @property def populated_from(self): return Roi() def _make_tuples(self, key): # embed() param = (ParamEyeFrame() & 'pupil_tracker_param_id=0').fetch.as_dict()[0] key['pupil_tracker_param_id'] = param['pupil_tracker_param_id'] video_path = (TrackInfo() & key).fetch1['base_video_path'] eye_roi = (Roi() & key).fetch1['x_roi_min', 'y_roi_min', 'x_roi_max', 'y_roi_max'] param['centre_dislocation_penalty'] = 0.001 param['distance_sq_pow'] = 1 tr = PupilTracker(param) trace = tr.track_without_svm(video_path, eye_roi) # CODE to insert data after tracking print("Tracking complete... Now inserting data to datajoint") efd = EyeFrame.Detection() # embed() for index, data in trace.iterrows(): key['frame'] = index self.insert1(key) if pd.notnull(data['pupil_x']): values = data.to_dict() values.update(key) # embed() efd.insert1(values) class Detection(dj.Part): definition = """ # eye frames with detected eye ->EyeFrame --- pupil_x : float # pupil x position pupil_y : float # pupil y position pupil_r_minor : float # pupil radius minor axis pupil_r_major : float # pupil radius major axis pupil_angle : float # angle of major axis vs. horizontal axis in radians pupil_x_std : float # pupil x position std pupil_y_std : float # pupil y position std pupil_r_minor_std : float # pupil radius minor axis std pupil_r_major_std : float # pupil radius major axis std pupil_angle_std : float # angle of major axis vs. horizontal axis in radians intensity_std : float # standard deviation of the ROI pixel values """ @schema class SelectionProtocol(dj.Lookup): definition = """ # groups of filtering steps to reject bad frames filter_protocol_id : int # id of the filtering protocol --- protocol_name : char(50) # descriptive name of the protocol """ contents = [ {'filter_protocol_id': 0, 'protocol_name': 'frame_intensity'}, {'filter_protocol_id': 1, 'protocol_name': 'int_and_ran_pupil_x_50_2'}, {'filter_protocol_id': 2, 'protocol_name': 'int_and_ran_pupil_x_75_2'}, {'filter_protocol_id': 3, 'protocol_name': 'int_and_ran_pupil_x_25_2'}, {'filter_protocol_id': 4, 'protocol_name': 'int_and_ran_pupil_pos'}, {'filter_protocol_id': 5, 'protocol_name': 'int_and_ran_pupil_pos_spikes_removed'}, {'filter_protocol_id': 6, 'protocol_name': 'int_and_ran_pupil_pos_spike_filter2'} ] def apply(self, frames, key): print("Applying filter with protocol id :", key['filter_protocol_id']) for step in (ProtocolStep() & key).fetch.order_by('priority').as_dict(): # embed() print("....for protocol id:", key['filter_protocol_id'], "applying filter with filter_id = ", step['filter_id']) frames = FrameSelector().apply(frames, step, param=step['filter_param']) return frames @schema class FrameSelector(dj.Lookup): definition = """ # single filters to reject frames filter_id : tinyint # id of the filter --- filter_name : char(50) # descriptive name of the filter """ contents = [ {'filter_id': 0, 'filter_name': 'intensity_filter'}, {'filter_id': 1, 'filter_name': 'ran_pupil_x_th'}, {'filter_id': 2, 'filter_name': 'ran_pupil_pos'}, {'filter_id': 3, 'filter_name': 'spike_filter'}, {'filter_id': 4, 'filter_name': 'spike_filter2'} ] def apply(self, frames, key, param): """ Apply takes a restriction of EyeFrame.Detection() and returns an even more restricted set of frames :param frames: restriction of EyeFrame.Detection() :param key: key that singles out a single filter :param param: parameters to the filter :return: an even more restricted set of frames """ which = (self & key).fetch1['filter_name'] if which == 'intensity_filter': i = frames.fetch['intensity_std'] th = np.percentile(i, param[0]) / param[1] return frames & 'intensity_std>{threshold}'.format(threshold=th) if which == 'ran_pupil_x_th': i = frames.fetch['pupil_x_std'] th = np.percentile(i, param[0]) return frames & 'pupil_x_std<{threshold}*{param}'.format(threshold=th, param=param[1]) if which == 'ran_pupil_pos': i = frames.fetch['pupil_x_std'] j = frames.fetch['pupil_y_std'] pos = i*i + j*j th = np.percentile(pos, param[0]) return frames & '(pupil_x_std*pupil_x_std + pupil_y_std*pupil_y_std)<{threshold}*{param}'.format(threshold=th, param=param[1]) if which == 'spike_filter': ra = frames.fetch.order_by('frame')['pupil_r_minor'] fr = frames.fetch.order_by('frame')['frame'] slope_coll = [] for i in range(1,ra.size): slope_coll.append((ra[i] - ra[i-1])/ (fr[i] - fr[i-1])) slope_coll1 = abs(np.asarray(slope_coll)) frames_rej = [dict(frame=k) for k in fr[np.where(slope_coll1 > param)]] return frames - frames_rej if which == 'spike_filter2': ra = frames.fetch.order_by('frame')['pupil_r_minor'] fr = frames.fetch.order_by('frame')['frame'] fr_rej=[] for i in range(2, ra.size-2): avg = (ra[i-2] + ra[i-1] + ra[i+1] + ra[i+2]) / 4 if abs(ra[i] - avg) > param: fr_rej.append(fr[i]) frames_rej = [dict(frame=k) for k in fr_rej] return frames - frames_rej @schema class ProtocolStep(dj.Lookup): definition = """ # single filter in a protocol to accept frames -> SelectionProtocol -> FrameSelector priority : int # priority of the filter step, the low the higher the priority --- filter_param=null : longblob # parameters that are passed to the filter """ # define the protocols. Each protocol has one id, but can have several filters contents = [ # parameter needs to be an array # protocol 0 contains only one filter and is based on intensity {'filter_protocol_id': 0, 'filter_id': 0, 'priority': 50, 'filter_param': np.array([50,2])}, # protocol 1 = intensity filter + ransac(50,2) {'filter_protocol_id': 1, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 1, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([50,2])}, # protocol 2 = intensity filter + ransac(75,2) {'filter_protocol_id': 2, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 2, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([75,2])}, # protocol 3 = intensity filter + ransac(25,2) {'filter_protocol_id': 3, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 3, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([25,2])}, # protocol 4 = intensity filter + ransac x2+y2 {'filter_protocol_id': 4, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 4, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])}, # protocol 5 = intensity filter + ransac x2+y2 + spike filter {'filter_protocol_id': 5, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 5, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])}, {'filter_protocol_id': 5, 'filter_id': 3, 'priority': 50, 'filter_param': np.array(50)}, # protocol 6 = intensity filter + ransac x2+y2 + spike filter2 {'filter_protocol_id': 6, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])}, {'filter_protocol_id': 6, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])}, {'filter_protocol_id': 6, 'filter_id': 4, 'priority': 50, 'filter_param': np.array(35)} ] @schema class SelectedFrame(dj.Computed): definition = """ # This schema only contains detected frames that meet a particular quality criterion -> EyeFrame.Detection -> SelectionProtocol --- """ @property def populated_from(self): return TrackInfo() * SelectionProtocol() & EyeFrame() def _make_tuples(self, key): print("Key = ", key) # embed() frames = EyeFrame.Detection() & key print('\tLength before filtering: {l}'.format(l=len(frames))) # & key can be removed from the line below frames = (SelectionProtocol() & key).apply(frames, key) print('\tLength after filtering: {l}'.format(l=len(frames))) for frame_key in frames.project().fetch.as_dict: key.update(frame_key) self.insert1(key)
lgpl-3.0
yavalvas/yav_com
build/matplotlib/examples/api/custom_projection_example.py
9
18246
from __future__ import unicode_literals import matplotlib from matplotlib.axes import Axes from matplotlib.patches import Circle from matplotlib.path import Path from matplotlib.ticker import NullLocator, Formatter, FixedLocator from matplotlib.transforms import Affine2D, BboxTransformTo, Transform from matplotlib.projections import register_projection import matplotlib.spines as mspines import matplotlib.axis as maxis import numpy as np # This example projection class is rather long, but it is designed to # illustrate many features, not all of which will be used every time. # It is also common to factor out a lot of these methods into common # code used by a number of projections with similar characteristics # (see geo.py). class HammerAxes(Axes): """ A custom class for the Aitoff-Hammer projection, an equal-area map projection. http://en.wikipedia.org/wiki/Hammer_projection """ # The projection must specify a name. This will be used be the # user to select the projection, i.e. ``subplot(111, # projection='custom_hammer')``. name = 'custom_hammer' def __init__(self, *args, **kwargs): Axes.__init__(self, *args, **kwargs) self.set_aspect(0.5, adjustable='box', anchor='C') self.cla() def _init_axis(self): self.xaxis = maxis.XAxis(self) self.yaxis = maxis.YAxis(self) # Do not register xaxis or yaxis with spines -- as done in # Axes._init_axis() -- until HammerAxes.xaxis.cla() works. # self.spines['hammer'].register_axis(self.yaxis) self._update_transScale() def cla(self): """ Override to set up some reasonable defaults. """ # Don't forget to call the base class Axes.cla(self) # Set up a default grid spacing self.set_longitude_grid(30) self.set_latitude_grid(15) self.set_longitude_grid_ends(75) # Turn off minor ticking altogether self.xaxis.set_minor_locator(NullLocator()) self.yaxis.set_minor_locator(NullLocator()) # Do not display ticks -- we only want gridlines and text self.xaxis.set_ticks_position('none') self.yaxis.set_ticks_position('none') # The limits on this projection are fixed -- they are not to # be changed by the user. This makes the math in the # transformation itself easier, and since this is a toy # example, the easier, the better. Axes.set_xlim(self, -np.pi, np.pi) Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0) def _set_lim_and_transforms(self): """ This is called once when the plot is created to set up all the transforms for the data, text and grids. """ # There are three important coordinate spaces going on here: # # 1. Data space: The space of the data itself # # 2. Axes space: The unit rectangle (0, 0) to (1, 1) # covering the entire plot area. # # 3. Display space: The coordinates of the resulting image, # often in pixels or dpi/inch. # This function makes heavy use of the Transform classes in # ``lib/matplotlib/transforms.py.`` For more information, see # the inline documentation there. # The goal of the first two transformations is to get from the # data space (in this case longitude and latitude) to axes # space. It is separated into a non-affine and affine part so # that the non-affine part does not have to be recomputed when # a simple affine change to the figure has been made (such as # resizing the window or changing the dpi). # 1) The core transformation from data space into # rectilinear space defined in the HammerTransform class. self.transProjection = self.HammerTransform() # 2) The above has an output range that is not in the unit # rectangle, so scale and translate it so it fits correctly # within the axes. The peculiar calculations of xscale and # yscale are specific to a Aitoff-Hammer projection, so don't # worry about them too much. xscale = 2.0 * np.sqrt(2.0) * np.sin(0.5 * np.pi) yscale = np.sqrt(2.0) * np.sin(0.5 * np.pi) self.transAffine = Affine2D() \ .scale(0.5 / xscale, 0.5 / yscale) \ .translate(0.5, 0.5) # 3) This is the transformation from axes space to display # space. self.transAxes = BboxTransformTo(self.bbox) # Now put these 3 transforms together -- from data all the way # to display coordinates. Using the '+' operator, these # transforms will be applied "in order". The transforms are # automatically simplified, if possible, by the underlying # transformation framework. self.transData = \ self.transProjection + \ self.transAffine + \ self.transAxes # The main data transformation is set up. Now deal with # gridlines and tick labels. # Longitude gridlines and ticklabels. The input to these # transforms are in display space in x and axes space in y. # Therefore, the input values will be in range (-xmin, 0), # (xmax, 1). The goal of these transforms is to go from that # space to display space. The tick labels will be offset 4 # pixels from the equator. self._xaxis_pretransform = \ Affine2D() \ .scale(1.0, np.pi) \ .translate(0.0, -np.pi) self._xaxis_transform = \ self._xaxis_pretransform + \ self.transData self._xaxis_text1_transform = \ Affine2D().scale(1.0, 0.0) + \ self.transData + \ Affine2D().translate(0.0, 4.0) self._xaxis_text2_transform = \ Affine2D().scale(1.0, 0.0) + \ self.transData + \ Affine2D().translate(0.0, -4.0) # Now set up the transforms for the latitude ticks. The input to # these transforms are in axes space in x and display space in # y. Therefore, the input values will be in range (0, -ymin), # (1, ymax). The goal of these transforms is to go from that # space to display space. The tick labels will be offset 4 # pixels from the edge of the axes ellipse. yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0) yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0) def get_xaxis_transform(self,which='grid'): """ Override this method to provide a transformation for the x-axis grid and ticks. """ assert which in ['tick1','tick2','grid'] return self._xaxis_transform def get_xaxis_text1_transform(self, pixelPad): """ Override this method to provide a transformation for the x-axis tick labels. Returns a tuple of the form (transform, valign, halign) """ return self._xaxis_text1_transform, 'bottom', 'center' def get_xaxis_text2_transform(self, pixelPad): """ Override this method to provide a transformation for the secondary x-axis tick labels. Returns a tuple of the form (transform, valign, halign) """ return self._xaxis_text2_transform, 'top', 'center' def get_yaxis_transform(self,which='grid'): """ Override this method to provide a transformation for the y-axis grid and ticks. """ assert which in ['tick1','tick2','grid'] return self._yaxis_transform def get_yaxis_text1_transform(self, pixelPad): """ Override this method to provide a transformation for the y-axis tick labels. Returns a tuple of the form (transform, valign, halign) """ return self._yaxis_text1_transform, 'center', 'right' def get_yaxis_text2_transform(self, pixelPad): """ Override this method to provide a transformation for the secondary y-axis tick labels. Returns a tuple of the form (transform, valign, halign) """ return self._yaxis_text2_transform, 'center', 'left' def _gen_axes_patch(self): """ Override this method to define the shape that is used for the background of the plot. It should be a subclass of Patch. In this case, it is a Circle (that may be warped by the axes transform into an ellipse). Any data and gridlines will be clipped to this shape. """ return Circle((0.5, 0.5), 0.5) def _gen_axes_spines(self): return {'custom_hammer':mspines.Spine.circular_spine(self, (0.5, 0.5), 0.5)} # Prevent the user from applying scales to one or both of the # axes. In this particular case, scaling the axes wouldn't make # sense, so we don't allow it. def set_xscale(self, *args, **kwargs): if args[0] != 'linear': raise NotImplementedError Axes.set_xscale(self, *args, **kwargs) def set_yscale(self, *args, **kwargs): if args[0] != 'linear': raise NotImplementedError Axes.set_yscale(self, *args, **kwargs) # Prevent the user from changing the axes limits. In our case, we # want to display the whole sphere all the time, so we override # set_xlim and set_ylim to ignore any input. This also applies to # interactive panning and zooming in the GUI interfaces. def set_xlim(self, *args, **kwargs): Axes.set_xlim(self, -np.pi, np.pi) Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0) set_ylim = set_xlim def format_coord(self, lon, lat): """ Override this method to change how the values are displayed in the status bar. In this case, we want them to be displayed in degrees N/S/E/W. """ lon = lon * (180.0 / np.pi) lat = lat * (180.0 / np.pi) if lat >= 0.0: ns = 'N' else: ns = 'S' if lon >= 0.0: ew = 'E' else: ew = 'W' # \u00b0 : degree symbol return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew) class DegreeFormatter(Formatter): """ This is a custom formatter that converts the native unit of radians into (truncated) degrees and adds a degree symbol. """ def __init__(self, round_to=1.0): self._round_to = round_to def __call__(self, x, pos=None): degrees = (x / np.pi) * 180.0 degrees = round(degrees / self._round_to) * self._round_to # \u00b0 : degree symbol return "%d\u00b0" % degrees def set_longitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. This is an example method that is specific to this projection class -- it provides a more convenient interface to set the ticking than set_xticks would. """ # Set up a FixedLocator at each of the points, evenly spaced # by degrees. number = (360.0 / degrees) + 1 self.xaxis.set_major_locator( plt.FixedLocator( np.linspace(-np.pi, np.pi, number, True)[1:-1])) # Set the formatter to display the tick labels in degrees, # rather than radians. self.xaxis.set_major_formatter(self.DegreeFormatter(degrees)) def set_latitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. This is an example method that is specific to this projection class -- it provides a more convenient interface than set_yticks would. """ # Set up a FixedLocator at each of the points, evenly spaced # by degrees. number = (180.0 / degrees) + 1 self.yaxis.set_major_locator( FixedLocator( np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1])) # Set the formatter to display the tick labels in degrees, # rather than radians. self.yaxis.set_major_formatter(self.DegreeFormatter(degrees)) def set_longitude_grid_ends(self, degrees): """ Set the latitude(s) at which to stop drawing the longitude grids. Often, in geographic projections, you wouldn't want to draw longitude gridlines near the poles. This allows the user to specify the degree at which to stop drawing longitude grids. This is an example method that is specific to this projection class -- it provides an interface to something that has no analogy in the base Axes class. """ longitude_cap = degrees * (np.pi / 180.0) # Change the xaxis gridlines transform so that it draws from # -degrees to degrees, rather than -pi to pi. self._xaxis_pretransform \ .clear() \ .scale(1.0, longitude_cap * 2.0) \ .translate(0.0, -longitude_cap) def get_data_ratio(self): """ Return the aspect ratio of the data itself. This method should be overridden by any Axes that have a fixed data ratio. """ return 1.0 # Interactive panning and zooming is not supported with this projection, # so we override all of the following methods to disable it. def can_zoom(self): """ Return True if this axes support the zoom box """ return False def start_pan(self, x, y, button): pass def end_pan(self): pass def drag_pan(self, button, key, x, y): pass # Now, the transforms themselves. class HammerTransform(Transform): """ The base Hammer transform. """ input_dims = 2 output_dims = 2 is_separable = False def transform_non_affine(self, ll): """ Override the transform_non_affine method to implement the custom transform. The input and output are Nx2 numpy arrays. """ longitude = ll[:, 0:1] latitude = ll[:, 1:2] # Pre-compute some values half_long = longitude / 2.0 cos_latitude = np.cos(latitude) sqrt2 = np.sqrt(2.0) alpha = 1.0 + cos_latitude * np.cos(half_long) x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha y = (sqrt2 * np.sin(latitude)) / alpha return np.concatenate((x, y), 1) # This is where things get interesting. With this projection, # straight lines in data space become curves in display space. # This is done by interpolating new values between the input # values of the data. Since ``transform`` must not return a # differently-sized array, any transform that requires # changing the length of the data array must happen within # ``transform_path``. def transform_path_non_affine(self, path): ipath = path.interpolated(path._interpolation_steps) return Path(self.transform(ipath.vertices), ipath.codes) transform_path_non_affine.__doc__ = \ Transform.transform_path_non_affine.__doc__ if matplotlib.__version__ < '1.2': # Note: For compatibility with matplotlib v1.1 and older, you'll # need to explicitly implement a ``transform`` method as well. # Otherwise a ``NotImplementedError`` will be raised. This isn't # necessary for v1.2 and newer, however. transform = transform_non_affine # Similarly, we need to explicitly override ``transform_path`` if # compatibility with older matplotlib versions is needed. With v1.2 # and newer, only overriding the ``transform_path_non_affine`` # method is sufficient. transform_path = transform_path_non_affine transform_path.__doc__ = Transform.transform_path.__doc__ def inverted(self): return HammerAxes.InvertedHammerTransform() inverted.__doc__ = Transform.inverted.__doc__ class InvertedHammerTransform(Transform): input_dims = 2 output_dims = 2 is_separable = False def transform_non_affine(self, xy): x = xy[:, 0:1] y = xy[:, 1:2] quarter_x = 0.25 * x half_y = 0.5 * y z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y) longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0))) latitude = np.arcsin(y*z) return np.concatenate((longitude, latitude), 1) transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ # As before, we need to implement the "transform" method for # compatibility with matplotlib v1.1 and older. if matplotlib.__version__ < '1.2': transform = transform_non_affine def inverted(self): # The inverse of the inverse is the original transform... ;) return HammerAxes.HammerTransform() inverted.__doc__ = Transform.inverted.__doc__ # Now register the projection with matplotlib so the user can select # it. register_projection(HammerAxes) if __name__ == '__main__': import matplotlib.pyplot as plt # Now make a simple example using the custom projection. plt.subplot(111, projection="custom_hammer") p = plt.plot([-1, 1, 1], [-1, -1, 1], "o-") plt.grid(True) plt.show()
mit
SanketDG/networkx
examples/graph/napoleon_russian_campaign.py
44
3216
#!/usr/bin/env python """ Minard's data from Napoleon's 1812-1813 Russian Campaign. http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" # Copyright (C) 2006 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import string import networkx as nx def minard_graph(): data1="""\ 24.0,54.9,340000,A,1 24.5,55.0,340000,A,1 25.5,54.5,340000,A,1 26.0,54.7,320000,A,1 27.0,54.8,300000,A,1 28.0,54.9,280000,A,1 28.5,55.0,240000,A,1 29.0,55.1,210000,A,1 30.0,55.2,180000,A,1 30.3,55.3,175000,A,1 32.0,54.8,145000,A,1 33.2,54.9,140000,A,1 34.4,55.5,127100,A,1 35.5,55.4,100000,A,1 36.0,55.5,100000,A,1 37.6,55.8,100000,A,1 37.7,55.7,100000,R,1 37.5,55.7,98000,R,1 37.0,55.0,97000,R,1 36.8,55.0,96000,R,1 35.4,55.3,87000,R,1 34.3,55.2,55000,R,1 33.3,54.8,37000,R,1 32.0,54.6,24000,R,1 30.4,54.4,20000,R,1 29.2,54.3,20000,R,1 28.5,54.2,20000,R,1 28.3,54.3,20000,R,1 27.5,54.5,20000,R,1 26.8,54.3,12000,R,1 26.4,54.4,14000,R,1 25.0,54.4,8000,R,1 24.4,54.4,4000,R,1 24.2,54.4,4000,R,1 24.1,54.4,4000,R,1""" data2="""\ 24.0,55.1,60000,A,2 24.5,55.2,60000,A,2 25.5,54.7,60000,A,2 26.6,55.7,40000,A,2 27.4,55.6,33000,A,2 28.7,55.5,33000,R,2 29.2,54.2,30000,R,2 28.5,54.1,30000,R,2 28.3,54.2,28000,R,2""" data3="""\ 24.0,55.2,22000,A,3 24.5,55.3,22000,A,3 24.6,55.8,6000,A,3 24.6,55.8,6000,R,3 24.2,54.4,6000,R,3 24.1,54.4,6000,R,3""" cities="""\ 24.0,55.0,Kowno 25.3,54.7,Wilna 26.4,54.4,Smorgoni 26.8,54.3,Moiodexno 27.7,55.2,Gloubokoe 27.6,53.9,Minsk 28.5,54.3,Studienska 28.7,55.5,Polotzk 29.2,54.4,Bobr 30.2,55.3,Witebsk 30.4,54.5,Orscha 30.4,53.9,Mohilow 32.0,54.8,Smolensk 33.2,54.9,Dorogobouge 34.3,55.2,Wixma 34.4,55.5,Chjat 36.0,55.5,Mojaisk 37.6,55.8,Moscou 36.6,55.3,Tarantino 36.5,55.0,Malo-Jarosewii""" c={} for line in cities.split('\n'): x,y,name=line.split(',') c[name]=(float(x),float(y)) g=[] for data in [data1,data2,data3]: G=nx.Graph() i=0 G.pos={} # location G.pop={} # size last=None for line in data.split('\n'): x,y,p,r,n=line.split(',') G.pos[i]=(float(x),float(y)) G.pop[i]=int(p) if last is None: last=i else: G.add_edge(i,last,{r:int(n)}) last=i i=i+1 g.append(G) return g,c if __name__ == "__main__": (g,city)=minard_graph() try: import matplotlib.pyplot as plt plt.figure(1,figsize=(11,5)) plt.clf() colors=['b','g','r'] for G in g: c=colors.pop(0) node_size=[int(G.pop[n]/300.0) for n in G] nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5) nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5) nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k') for c in city: x,y=city[c] plt.text(x,y+0.1,c) plt.savefig("napoleon_russian_campaign.png") except ImportError: pass
bsd-3-clause
facebookincubator/prophet
python/prophet/forecaster.py
2
64372
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import logging from collections import OrderedDict, defaultdict from copy import deepcopy from datetime import timedelta, datetime import numpy as np import pandas as pd from prophet.make_holidays import get_holiday_names, make_holidays_df from prophet.models import StanBackendEnum from prophet.plot import (plot, plot_components) logger = logging.getLogger('prophet') logger.setLevel(logging.INFO) class Prophet(object): """Prophet forecaster. Parameters ---------- growth: String 'linear' or 'logistic' to specify a linear or logistic trend. changepoints: List of dates at which to include potential changepoints. If not specified, potential changepoints are selected automatically. n_changepoints: Number of potential changepoints to include. Not used if input `changepoints` is supplied. If `changepoints` is not supplied, then n_changepoints potential changepoints are selected uniformly from the first `changepoint_range` proportion of the history. changepoint_range: Proportion of history in which trend changepoints will be estimated. Defaults to 0.8 for the first 80%. Not used if `changepoints` is specified. yearly_seasonality: Fit yearly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. weekly_seasonality: Fit weekly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. daily_seasonality: Fit daily seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. holidays: pd.DataFrame with columns holiday (string) and ds (date type) and optionally columns lower_window and upper_window which specify a range of days around the date to be included as holidays. lower_window=-2 will include 2 days prior to the date as holidays. Also optionally can have a column prior_scale specifying the prior scale for that holiday. seasonality_mode: 'additive' (default) or 'multiplicative'. seasonality_prior_scale: Parameter modulating the strength of the seasonality model. Larger values allow the model to fit larger seasonal fluctuations, smaller values dampen the seasonality. Can be specified for individual seasonalities using add_seasonality. holidays_prior_scale: Parameter modulating the strength of the holiday components model, unless overridden in the holidays input. changepoint_prior_scale: Parameter modulating the flexibility of the automatic changepoint selection. Large values will allow many changepoints, small values will allow few changepoints. mcmc_samples: Integer, if greater than 0, will do full Bayesian inference with the specified number of MCMC samples. If 0, will do MAP estimation. interval_width: Float, width of the uncertainty intervals provided for the forecast. If mcmc_samples=0, this will be only the uncertainty in the trend using the MAP estimate of the extrapolated generative model. If mcmc.samples>0, this will be integrated over all model parameters, which will include uncertainty in seasonality. uncertainty_samples: Number of simulated draws used to estimate uncertainty intervals. Settings this value to 0 or False will disable uncertainty estimation and speed up the calculation. stan_backend: str as defined in StanBackendEnum default: None - will try to iterate over all available backends and find the working one """ def __init__( self, growth='linear', changepoints=None, n_changepoints=25, changepoint_range=0.8, yearly_seasonality='auto', weekly_seasonality='auto', daily_seasonality='auto', holidays=None, seasonality_mode='additive', seasonality_prior_scale=10.0, holidays_prior_scale=10.0, changepoint_prior_scale=0.05, mcmc_samples=0, interval_width=0.80, uncertainty_samples=1000, stan_backend=None ): self.growth = growth self.changepoints = changepoints if self.changepoints is not None: self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name='ds') self.n_changepoints = len(self.changepoints) self.specified_changepoints = True else: self.n_changepoints = n_changepoints self.specified_changepoints = False self.changepoint_range = changepoint_range self.yearly_seasonality = yearly_seasonality self.weekly_seasonality = weekly_seasonality self.daily_seasonality = daily_seasonality self.holidays = holidays self.seasonality_mode = seasonality_mode self.seasonality_prior_scale = float(seasonality_prior_scale) self.changepoint_prior_scale = float(changepoint_prior_scale) self.holidays_prior_scale = float(holidays_prior_scale) self.mcmc_samples = mcmc_samples self.interval_width = interval_width self.uncertainty_samples = uncertainty_samples # Set during fitting or by other methods self.start = None self.y_scale = None self.logistic_floor = False self.t_scale = None self.changepoints_t = None self.seasonalities = OrderedDict({}) self.extra_regressors = OrderedDict({}) self.country_holidays = None self.stan_fit = None self.params = {} self.history = None self.history_dates = None self.train_component_cols = None self.component_modes = None self.train_holiday_names = None self.fit_kwargs = {} self.validate_inputs() self._load_stan_backend(stan_backend) def _load_stan_backend(self, stan_backend): if stan_backend is None: for i in StanBackendEnum: try: logger.debug("Trying to load backend: %s", i.name) return self._load_stan_backend(i.name) except Exception as e: logger.debug("Unable to load backend %s (%s), trying the next one", i.name, e) else: self.stan_backend = StanBackendEnum.get_backend_class(stan_backend)() logger.debug("Loaded stan backend: %s", self.stan_backend.get_type()) def validate_inputs(self): """Validates the inputs to Prophet.""" if self.growth not in ('linear', 'logistic', 'flat'): raise ValueError( 'Parameter "growth" should be "linear", "logistic" or "flat".') if ((self.changepoint_range < 0) or (self.changepoint_range > 1)): raise ValueError('Parameter "changepoint_range" must be in [0, 1]') if self.holidays is not None: if not ( isinstance(self.holidays, pd.DataFrame) and 'ds' in self.holidays # noqa W503 and 'holiday' in self.holidays # noqa W503 ): raise ValueError('holidays must be a DataFrame with "ds" and ' '"holiday" columns.') self.holidays['ds'] = pd.to_datetime(self.holidays['ds']) if ( self.holidays['ds'].isnull().any() or self.holidays['holiday'].isnull().any() ): raise ValueError('Found a NaN in holidays dataframe.') has_lower = 'lower_window' in self.holidays has_upper = 'upper_window' in self.holidays if has_lower + has_upper == 1: raise ValueError('Holidays must have both lower_window and ' + 'upper_window, or neither') if has_lower: if self.holidays['lower_window'].max() > 0: raise ValueError('Holiday lower_window should be <= 0') if self.holidays['upper_window'].min() < 0: raise ValueError('Holiday upper_window should be >= 0') for h in self.holidays['holiday'].unique(): self.validate_column_name(h, check_holidays=False) if self.seasonality_mode not in ['additive', 'multiplicative']: raise ValueError( 'seasonality_mode must be "additive" or "multiplicative"' ) def validate_column_name(self, name, check_holidays=True, check_seasonalities=True, check_regressors=True): """Validates the name of a seasonality, holiday, or regressor. Parameters ---------- name: string check_holidays: bool check if name already used for holiday check_seasonalities: bool check if name already used for seasonality check_regressors: bool check if name already used for regressor """ if '_delim_' in name: raise ValueError('Name cannot contain "_delim_"') reserved_names = [ 'trend', 'additive_terms', 'daily', 'weekly', 'yearly', 'holidays', 'zeros', 'extra_regressors_additive', 'yhat', 'extra_regressors_multiplicative', 'multiplicative_terms', ] rn_l = [n + '_lower' for n in reserved_names] rn_u = [n + '_upper' for n in reserved_names] reserved_names.extend(rn_l) reserved_names.extend(rn_u) reserved_names.extend([ 'ds', 'y', 'cap', 'floor', 'y_scaled', 'cap_scaled']) if name in reserved_names: raise ValueError( 'Name {name!r} is reserved.'.format(name=name) ) if (check_holidays and self.holidays is not None and name in self.holidays['holiday'].unique()): raise ValueError( 'Name {name!r} already used for a holiday.'.format(name=name) ) if (check_holidays and self.country_holidays is not None and name in get_holiday_names(self.country_holidays)): raise ValueError( 'Name {name!r} is a holiday name in {country_holidays}.' .format(name=name, country_holidays=self.country_holidays) ) if check_seasonalities and name in self.seasonalities: raise ValueError( 'Name {name!r} already used for a seasonality.' .format(name=name) ) if check_regressors and name in self.extra_regressors: raise ValueError( 'Name {name!r} already used for an added regressor.' .format(name=name) ) def setup_dataframe(self, df, initialize_scales=False): """Prepare dataframe for fitting or predicting. Adds a time index and scales y. Creates auxiliary columns 't', 't_ix', 'y_scaled', and 'cap_scaled'. These columns are used during both fitting and predicting. Parameters ---------- df: pd.DataFrame with columns ds, y, and cap if logistic growth. Any specified additional regressors must also be present. initialize_scales: Boolean set scaling factors in self from df. Returns ------- pd.DataFrame prepared for fitting or predicting. """ if 'y' in df: # 'y' will be in training data df['y'] = pd.to_numeric(df['y']) if np.isinf(df['y'].values).any(): raise ValueError('Found infinity in column y.') if df['ds'].dtype == np.int64: df['ds'] = df['ds'].astype(str) df['ds'] = pd.to_datetime(df['ds']) if df['ds'].dt.tz is not None: raise ValueError( 'Column ds has timezone specified, which is not supported. ' 'Remove timezone.' ) if df['ds'].isnull().any(): raise ValueError('Found NaN in column ds.') for name in self.extra_regressors: if name not in df: raise ValueError( 'Regressor {name!r} missing from dataframe' .format(name=name) ) df[name] = pd.to_numeric(df[name]) if df[name].isnull().any(): raise ValueError( 'Found NaN in column {name!r}'.format(name=name) ) for props in self.seasonalities.values(): condition_name = props['condition_name'] if condition_name is not None: if condition_name not in df: raise ValueError( 'Condition {condition_name!r} missing from dataframe' .format(condition_name=condition_name) ) if not df[condition_name].isin([True, False]).all(): raise ValueError( 'Found non-boolean in column {condition_name!r}' .format(condition_name=condition_name) ) df[condition_name] = df[condition_name].astype('bool') if df.index.name == 'ds': df.index.name = None df = df.sort_values('ds') df = df.reset_index(drop=True) self.initialize_scales(initialize_scales, df) if self.logistic_floor: if 'floor' not in df: raise ValueError('Expected column "floor".') else: df['floor'] = 0 if self.growth == 'logistic': if 'cap' not in df: raise ValueError( 'Capacities must be supplied for logistic growth in ' 'column "cap"' ) if (df['cap'] <= df['floor']).any(): raise ValueError( 'cap must be greater than floor (which defaults to 0).' ) df['cap_scaled'] = (df['cap'] - df['floor']) / self.y_scale df['t'] = (df['ds'] - self.start) / self.t_scale if 'y' in df: df['y_scaled'] = (df['y'] - df['floor']) / self.y_scale for name, props in self.extra_regressors.items(): df[name] = ((df[name] - props['mu']) / props['std']) return df def initialize_scales(self, initialize_scales, df): """Initialize model scales. Sets model scaling factors using df. Parameters ---------- initialize_scales: Boolean set the scales or not. df: pd.DataFrame for setting scales. """ if not initialize_scales: return if self.growth == 'logistic' and 'floor' in df: self.logistic_floor = True floor = df['floor'] else: floor = 0. self.y_scale = float((df['y'] - floor).abs().max()) if self.y_scale == 0: self.y_scale = 1.0 self.start = df['ds'].min() self.t_scale = df['ds'].max() - self.start for name, props in self.extra_regressors.items(): standardize = props['standardize'] n_vals = len(df[name].unique()) if n_vals < 2: standardize = False if standardize == 'auto': if set(df[name].unique()) == {1, 0}: standardize = False # Don't standardize binary variables. else: standardize = True if standardize: mu = df[name].mean() std = df[name].std() self.extra_regressors[name]['mu'] = mu self.extra_regressors[name]['std'] = std def set_changepoints(self): """Set changepoints Sets m$changepoints to the dates of changepoints. Either: 1) The changepoints were passed in explicitly. A) They are empty. B) They are not empty, and need validation. 2) We are generating a grid of them. 3) The user prefers no changepoints be used. """ if self.changepoints is not None: if len(self.changepoints) == 0: pass else: too_low = min(self.changepoints) < self.history['ds'].min() too_high = max(self.changepoints) > self.history['ds'].max() if too_low or too_high: raise ValueError( 'Changepoints must fall within training data.') else: # Place potential changepoints evenly through first # `changepoint_range` proportion of the history hist_size = int(np.floor(self.history.shape[0] * self.changepoint_range)) if self.n_changepoints + 1 > hist_size: self.n_changepoints = hist_size - 1 logger.info( 'n_changepoints greater than number of observations. ' 'Using {n_changepoints}.' .format(n_changepoints=self.n_changepoints) ) if self.n_changepoints > 0: cp_indexes = ( np.linspace(0, hist_size - 1, self.n_changepoints + 1) .round() .astype(int) ) self.changepoints = ( self.history.iloc[cp_indexes]['ds'].tail(-1) ) else: # set empty changepoints self.changepoints = pd.Series(pd.to_datetime([]), name='ds') if len(self.changepoints) > 0: self.changepoints_t = np.sort(np.array( (self.changepoints - self.start) / self.t_scale)) else: self.changepoints_t = np.array([0]) # dummy changepoint @staticmethod def fourier_series(dates, period, series_order): """Provides Fourier series components with the specified frequency and order. Parameters ---------- dates: pd.Series containing timestamps. period: Number of days of the period. series_order: Number of components. Returns ------- Matrix with seasonality features. """ # convert to days since epoch t = np.array( (dates - datetime(1970, 1, 1)) .dt.total_seconds() .astype(float) ) / (3600 * 24.) return np.column_stack([ fun((2.0 * (i + 1) * np.pi * t / period)) for i in range(series_order) for fun in (np.sin, np.cos) ]) @classmethod def make_seasonality_features(cls, dates, period, series_order, prefix): """Data frame with seasonality features. Parameters ---------- cls: Prophet class. dates: pd.Series containing timestamps. period: Number of days of the period. series_order: Number of components. prefix: Column name prefix. Returns ------- pd.DataFrame with seasonality features. """ features = cls.fourier_series(dates, period, series_order) columns = [ '{}_delim_{}'.format(prefix, i + 1) for i in range(features.shape[1]) ] return pd.DataFrame(features, columns=columns) def construct_holiday_dataframe(self, dates): """Construct a dataframe of holiday dates. Will combine self.holidays with the built-in country holidays corresponding to input dates, if self.country_holidays is set. Parameters ---------- dates: pd.Series containing timestamps used for computing seasonality. Returns ------- dataframe of holiday dates, in holiday dataframe format used in initialization. """ all_holidays = pd.DataFrame() if self.holidays is not None: all_holidays = self.holidays.copy() if self.country_holidays is not None: year_list = list({x.year for x in dates}) country_holidays_df = make_holidays_df( year_list=year_list, country=self.country_holidays ) all_holidays = pd.concat((all_holidays, country_holidays_df), sort=False) all_holidays.reset_index(drop=True, inplace=True) # Drop future holidays not previously seen in training data if self.train_holiday_names is not None: # Remove holiday names didn't show up in fit index_to_drop = all_holidays.index[ np.logical_not( all_holidays.holiday.isin(self.train_holiday_names) ) ] all_holidays = all_holidays.drop(index_to_drop) # Add holiday names in fit but not in predict with ds as NA holidays_to_add = pd.DataFrame({ 'holiday': self.train_holiday_names[ np.logical_not(self.train_holiday_names .isin(all_holidays.holiday)) ] }) all_holidays = pd.concat((all_holidays, holidays_to_add), sort=False) all_holidays.reset_index(drop=True, inplace=True) return all_holidays def make_holiday_features(self, dates, holidays): """Construct a dataframe of holiday features. Parameters ---------- dates: pd.Series containing timestamps used for computing seasonality. holidays: pd.Dataframe containing holidays, as returned by construct_holiday_dataframe. Returns ------- holiday_features: pd.DataFrame with a column for each holiday. prior_scale_list: List of prior scales for each holiday column. holiday_names: List of names of holidays """ # Holds columns of our future matrix. expanded_holidays = defaultdict(lambda: np.zeros(dates.shape[0])) prior_scales = {} # Makes an index so we can perform `get_loc` below. # Strip to just dates. row_index = pd.DatetimeIndex(dates.apply(lambda x: x.date())) for _ix, row in holidays.iterrows(): dt = row.ds.date() try: lw = int(row.get('lower_window', 0)) uw = int(row.get('upper_window', 0)) except ValueError: lw = 0 uw = 0 ps = float(row.get('prior_scale', self.holidays_prior_scale)) if np.isnan(ps): ps = float(self.holidays_prior_scale) if row.holiday in prior_scales and prior_scales[row.holiday] != ps: raise ValueError( 'Holiday {holiday!r} does not have consistent prior ' 'scale specification.'.format(holiday=row.holiday) ) if ps <= 0: raise ValueError('Prior scale must be > 0') prior_scales[row.holiday] = ps for offset in range(lw, uw + 1): occurrence = pd.to_datetime(dt + timedelta(days=offset)) try: loc = row_index.get_loc(occurrence) except KeyError: loc = None key = '{}_delim_{}{}'.format( row.holiday, '+' if offset >= 0 else '-', abs(offset) ) if loc is not None: expanded_holidays[key][loc] = 1. else: expanded_holidays[key] # Access key to generate value holiday_features = pd.DataFrame(expanded_holidays) # Make sure column order is consistent holiday_features = holiday_features[sorted(holiday_features.columns .tolist())] prior_scale_list = [ prior_scales[h.split('_delim_')[0]] for h in holiday_features.columns ] holiday_names = list(prior_scales.keys()) # Store holiday names used in fit if self.train_holiday_names is None: self.train_holiday_names = pd.Series(holiday_names) return holiday_features, prior_scale_list, holiday_names def add_regressor(self, name, prior_scale=None, standardize='auto', mode=None): """Add an additional regressor to be used for fitting and predicting. The dataframe passed to `fit` and `predict` will have a column with the specified name to be used as a regressor. When standardize='auto', the regressor will be standardized unless it is binary. The regression coefficient is given a prior with the specified scale parameter. Decreasing the prior scale will add additional regularization. If no prior scale is provided, self.holidays_prior_scale will be used. Mode can be specified as either 'additive' or 'multiplicative'. If not specified, self.seasonality_mode will be used. 'additive' means the effect of the regressor will be added to the trend, 'multiplicative' means it will multiply the trend. Parameters ---------- name: string name of the regressor. prior_scale: optional float scale for the normal prior. If not provided, self.holidays_prior_scale will be used. standardize: optional, specify whether this regressor will be standardized prior to fitting. Can be 'auto' (standardize if not binary), True, or False. mode: optional, 'additive' or 'multiplicative'. Defaults to self.seasonality_mode. Returns ------- The prophet object. """ if self.history is not None: raise Exception( "Regressors must be added prior to model fitting.") self.validate_column_name(name, check_regressors=False) if prior_scale is None: prior_scale = float(self.holidays_prior_scale) if mode is None: mode = self.seasonality_mode if prior_scale <= 0: raise ValueError('Prior scale must be > 0') if mode not in ['additive', 'multiplicative']: raise ValueError("mode must be 'additive' or 'multiplicative'") self.extra_regressors[name] = { 'prior_scale': prior_scale, 'standardize': standardize, 'mu': 0., 'std': 1., 'mode': mode, } return self def add_seasonality(self, name, period, fourier_order, prior_scale=None, mode=None, condition_name=None): """Add a seasonal component with specified period, number of Fourier components, and prior scale. Increasing the number of Fourier components allows the seasonality to change more quickly (at risk of overfitting). Default values for yearly and weekly seasonalities are 10 and 3 respectively. Increasing prior scale will allow this seasonality component more flexibility, decreasing will dampen it. If not provided, will use the seasonality_prior_scale provided on Prophet initialization (defaults to 10). Mode can be specified as either 'additive' or 'multiplicative'. If not specified, self.seasonality_mode will be used (defaults to additive). Additive means the seasonality will be added to the trend, multiplicative means it will multiply the trend. If condition_name is provided, the dataframe passed to `fit` and `predict` should have a column with the specified condition_name containing booleans which decides when to apply seasonality. Parameters ---------- name: string name of the seasonality component. period: float number of days in one period. fourier_order: int number of Fourier components to use. prior_scale: optional float prior scale for this component. mode: optional 'additive' or 'multiplicative' condition_name: string name of the seasonality condition. Returns ------- The prophet object. """ if self.history is not None: raise Exception( 'Seasonality must be added prior to model fitting.') if name not in ['daily', 'weekly', 'yearly']: # Allow overwriting built-in seasonalities self.validate_column_name(name, check_seasonalities=False) if prior_scale is None: ps = self.seasonality_prior_scale else: ps = float(prior_scale) if ps <= 0: raise ValueError('Prior scale must be > 0') if fourier_order <= 0: raise ValueError('Fourier Order must be > 0') if mode is None: mode = self.seasonality_mode if mode not in ['additive', 'multiplicative']: raise ValueError('mode must be "additive" or "multiplicative"') if condition_name is not None: self.validate_column_name(condition_name) self.seasonalities[name] = { 'period': period, 'fourier_order': fourier_order, 'prior_scale': ps, 'mode': mode, 'condition_name': condition_name, } return self def add_country_holidays(self, country_name): """Add in built-in holidays for the specified country. These holidays will be included in addition to any specified on model initialization. Holidays will be calculated for arbitrary date ranges in the history and future. See the online documentation for the list of countries with built-in holidays. Built-in country holidays can only be set for a single country. Parameters ---------- country_name: Name of the country, like 'UnitedStates' or 'US' Returns ------- The prophet object. """ if self.history is not None: raise Exception( "Country holidays must be added prior to model fitting." ) # Validate names. for name in get_holiday_names(country_name): # Allow merging with existing holidays self.validate_column_name(name, check_holidays=False) # Set the holidays. if self.country_holidays is not None: logger.warning( 'Changing country holidays from {country_holidays!r} to ' '{country_name!r}.' .format( country_holidays=self.country_holidays, country_name=country_name, ) ) self.country_holidays = country_name return self def make_all_seasonality_features(self, df): """Dataframe with seasonality features. Includes seasonality features, holiday features, and added regressors. Parameters ---------- df: pd.DataFrame with dates for computing seasonality features and any added regressors. Returns ------- pd.DataFrame with regression features. list of prior scales for each column of the features dataframe. Dataframe with indicators for which regression components correspond to which columns. Dictionary with keys 'additive' and 'multiplicative' listing the component names for each mode of seasonality. """ seasonal_features = [] prior_scales = [] modes = {'additive': [], 'multiplicative': []} # Seasonality features for name, props in self.seasonalities.items(): features = self.make_seasonality_features( df['ds'], props['period'], props['fourier_order'], name, ) if props['condition_name'] is not None: features[~df[props['condition_name']]] = 0 seasonal_features.append(features) prior_scales.extend( [props['prior_scale']] * features.shape[1]) modes[props['mode']].append(name) # Holiday features holidays = self.construct_holiday_dataframe(df['ds']) if len(holidays) > 0: features, holiday_priors, holiday_names = ( self.make_holiday_features(df['ds'], holidays) ) seasonal_features.append(features) prior_scales.extend(holiday_priors) modes[self.seasonality_mode].extend(holiday_names) # Additional regressors for name, props in self.extra_regressors.items(): seasonal_features.append(pd.DataFrame(df[name])) prior_scales.append(props['prior_scale']) modes[props['mode']].append(name) # Dummy to prevent empty X if len(seasonal_features) == 0: seasonal_features.append( pd.DataFrame({'zeros': np.zeros(df.shape[0])})) prior_scales.append(1.) seasonal_features = pd.concat(seasonal_features, axis=1) component_cols, modes = self.regressor_column_matrix( seasonal_features, modes ) return seasonal_features, prior_scales, component_cols, modes def regressor_column_matrix(self, seasonal_features, modes): """Dataframe indicating which columns of the feature matrix correspond to which seasonality/regressor components. Includes combination components, like 'additive_terms'. These combination components will be added to the 'modes' input. Parameters ---------- seasonal_features: Constructed seasonal features dataframe modes: Dictionary with keys 'additive' and 'multiplicative' listing the component names for each mode of seasonality. Returns ------- component_cols: A binary indicator dataframe with columns seasonal components and rows columns in seasonal_features. Entry is 1 if that columns is used in that component. modes: Updated input with combination components. """ components = pd.DataFrame({ 'col': np.arange(seasonal_features.shape[1]), 'component': [ x.split('_delim_')[0] for x in seasonal_features.columns ], }) # Add total for holidays if self.train_holiday_names is not None: components = self.add_group_component( components, 'holidays', self.train_holiday_names.unique()) # Add totals additive and multiplicative components, and regressors for mode in ['additive', 'multiplicative']: components = self.add_group_component( components, mode + '_terms', modes[mode] ) regressors_by_mode = [ r for r, props in self.extra_regressors.items() if props['mode'] == mode ] components = self.add_group_component( components, 'extra_regressors_' + mode, regressors_by_mode) # Add combination components to modes modes[mode].append(mode + '_terms') modes[mode].append('extra_regressors_' + mode) # After all of the additive/multiplicative groups have been added, modes[self.seasonality_mode].append('holidays') # Convert to a binary matrix component_cols = pd.crosstab( components['col'], components['component'], ).sort_index(level='col') # Add columns for additive and multiplicative terms, if missing for name in ['additive_terms', 'multiplicative_terms']: if name not in component_cols: component_cols[name] = 0 # Remove the placeholder component_cols.drop('zeros', axis=1, inplace=True, errors='ignore') # Validation if (max(component_cols['additive_terms'] + component_cols['multiplicative_terms']) > 1): raise Exception('A bug occurred in seasonal components.') # Compare to the training, if set. if self.train_component_cols is not None: component_cols = component_cols[self.train_component_cols.columns] if not component_cols.equals(self.train_component_cols): raise Exception('A bug occurred in constructing regressors.') return component_cols, modes def add_group_component(self, components, name, group): """Adds a component with given name that contains all of the components in group. Parameters ---------- components: Dataframe with components. name: Name of new group component. group: List of components that form the group. Returns ------- Dataframe with components. """ new_comp = components[components['component'].isin(set(group))].copy() group_cols = new_comp['col'].unique() if len(group_cols) > 0: new_comp = pd.DataFrame({'col': group_cols, 'component': name}) components = components.append(new_comp) return components def parse_seasonality_args(self, name, arg, auto_disable, default_order): """Get number of fourier components for built-in seasonalities. Parameters ---------- name: string name of the seasonality component. arg: 'auto', True, False, or number of fourier components as provided. auto_disable: bool if seasonality should be disabled when 'auto'. default_order: int default fourier order Returns ------- Number of fourier components, or 0 for disabled. """ if arg == 'auto': fourier_order = 0 if name in self.seasonalities: logger.info( 'Found custom seasonality named {name!r}, disabling ' 'built-in {name!r} seasonality.'.format(name=name) ) elif auto_disable: logger.info( 'Disabling {name} seasonality. Run prophet with ' '{name}_seasonality=True to override this.' .format(name=name) ) else: fourier_order = default_order elif arg is True: fourier_order = default_order elif arg is False: fourier_order = 0 else: fourier_order = int(arg) return fourier_order def set_auto_seasonalities(self): """Set seasonalities that were left on auto. Turns on yearly seasonality if there is >=2 years of history. Turns on weekly seasonality if there is >=2 weeks of history, and the spacing between dates in the history is <7 days. Turns on daily seasonality if there is >=2 days of history, and the spacing between dates in the history is <1 day. """ first = self.history['ds'].min() last = self.history['ds'].max() dt = self.history['ds'].diff() min_dt = dt.iloc[dt.values.nonzero()[0]].min() # Yearly seasonality yearly_disable = last - first < pd.Timedelta(days=730) fourier_order = self.parse_seasonality_args( 'yearly', self.yearly_seasonality, yearly_disable, 10) if fourier_order > 0: self.seasonalities['yearly'] = { 'period': 365.25, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } # Weekly seasonality weekly_disable = ((last - first < pd.Timedelta(weeks=2)) or (min_dt >= pd.Timedelta(weeks=1))) fourier_order = self.parse_seasonality_args( 'weekly', self.weekly_seasonality, weekly_disable, 3) if fourier_order > 0: self.seasonalities['weekly'] = { 'period': 7, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } # Daily seasonality daily_disable = ((last - first < pd.Timedelta(days=2)) or (min_dt >= pd.Timedelta(days=1))) fourier_order = self.parse_seasonality_args( 'daily', self.daily_seasonality, daily_disable, 4) if fourier_order > 0: self.seasonalities['daily'] = { 'period': 1, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } @staticmethod def linear_growth_init(df): """Initialize linear growth. Provides a strong initialization for linear growth by calculating the growth and offset parameters that pass the function through the first and last points in the time series. Parameters ---------- df: pd.DataFrame with columns ds (date), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the linear growth function. """ i0, i1 = df['ds'].idxmin(), df['ds'].idxmax() T = df['t'].iloc[i1] - df['t'].iloc[i0] k = (df['y_scaled'].iloc[i1] - df['y_scaled'].iloc[i0]) / T m = df['y_scaled'].iloc[i0] - k * df['t'].iloc[i0] return (k, m) @staticmethod def logistic_growth_init(df): """Initialize logistic growth. Provides a strong initialization for logistic growth by calculating the growth and offset parameters that pass the function through the first and last points in the time series. Parameters ---------- df: pd.DataFrame with columns ds (date), cap_scaled (scaled capacity), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the logistic growth function. """ i0, i1 = df['ds'].idxmin(), df['ds'].idxmax() T = df['t'].iloc[i1] - df['t'].iloc[i0] # Force valid values, in case y > cap or y < 0 C0 = df['cap_scaled'].iloc[i0] C1 = df['cap_scaled'].iloc[i1] y0 = max(0.01 * C0, min(0.99 * C0, df['y_scaled'].iloc[i0])) y1 = max(0.01 * C1, min(0.99 * C1, df['y_scaled'].iloc[i1])) r0 = C0 / y0 r1 = C1 / y1 if abs(r0 - r1) <= 0.01: r0 = 1.05 * r0 L0 = np.log(r0 - 1) L1 = np.log(r1 - 1) # Initialize the offset m = L0 * T / (L0 - L1) # And the rate k = (L0 - L1) / T return (k, m) @staticmethod def flat_growth_init(df): """Initialize flat growth. Provides a strong initialization for flat growth. Sets the growth to 0 and offset parameter as mean of history y_scaled values. Parameters ---------- df: pd.DataFrame with columns ds (date), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the linear growth function. """ k = 0 m = df['y_scaled'].mean() return k, m def fit(self, df, **kwargs): """Fit the Prophet model. This sets self.params to contain the fitted model parameters. It is a dictionary parameter names as keys and the following items: k (Mx1 array): M posterior samples of the initial slope. m (Mx1 array): The initial intercept. delta (MxN array): The slope change at each of N changepoints. beta (MxK matrix): Coefficients for K seasonality features. sigma_obs (Mx1 array): Noise level. Note that M=1 if MAP estimation. Parameters ---------- df: pd.DataFrame containing the history. Must have columns ds (date type) and y, the time series. If self.growth is 'logistic', then df must also have a column cap that specifies the capacity at each ds. kwargs: Additional arguments passed to the optimizing or sampling functions in Stan. Returns ------- The fitted Prophet object. """ if self.history is not None: raise Exception('Prophet object can only be fit once. ' 'Instantiate a new object.') if ('ds' not in df) or ('y' not in df): raise ValueError( 'Dataframe must have columns "ds" and "y" with the dates and ' 'values respectively.' ) history = df[df['y'].notnull()].copy() if history.shape[0] < 2: raise ValueError('Dataframe has less than 2 non-NaN rows.') self.history_dates = pd.to_datetime(pd.Series(df['ds'].unique(), name='ds')).sort_values() history = self.setup_dataframe(history, initialize_scales=True) self.history = history self.set_auto_seasonalities() seasonal_features, prior_scales, component_cols, modes = ( self.make_all_seasonality_features(history)) self.train_component_cols = component_cols self.component_modes = modes self.fit_kwargs = deepcopy(kwargs) self.set_changepoints() trend_indicator = {'linear': 0, 'logistic': 1, 'flat': 2} dat = { 'T': history.shape[0], 'K': seasonal_features.shape[1], 'S': len(self.changepoints_t), 'y': history['y_scaled'], 't': history['t'], 't_change': self.changepoints_t, 'X': seasonal_features, 'sigmas': prior_scales, 'tau': self.changepoint_prior_scale, 'trend_indicator': trend_indicator[self.growth], 's_a': component_cols['additive_terms'], 's_m': component_cols['multiplicative_terms'], } if self.growth == 'linear': dat['cap'] = np.zeros(self.history.shape[0]) kinit = self.linear_growth_init(history) elif self.growth == 'flat': dat['cap'] = np.zeros(self.history.shape[0]) kinit = self.flat_growth_init(history) else: dat['cap'] = history['cap_scaled'] kinit = self.logistic_growth_init(history) stan_init = { 'k': kinit[0], 'm': kinit[1], 'delta': np.zeros(len(self.changepoints_t)), 'beta': np.zeros(seasonal_features.shape[1]), 'sigma_obs': 1, } if history['y'].min() == history['y'].max() and \ (self.growth == 'linear' or self.growth == 'flat'): self.params = stan_init self.params['sigma_obs'] = 1e-9 for par in self.params: self.params[par] = np.array([self.params[par]]) elif self.mcmc_samples > 0: self.params = self.stan_backend.sampling(stan_init, dat, self.mcmc_samples, **kwargs) else: self.params = self.stan_backend.fit(stan_init, dat, **kwargs) self.stan_fit = self.stan_backend.stan_fit # If no changepoints were requested, replace delta with 0s if len(self.changepoints) == 0: # Fold delta into the base rate k self.params['k'] = (self.params['k'] + self.params['delta'].reshape(-1)) self.params['delta'] = (np.zeros(self.params['delta'].shape) .reshape((-1, 1))) return self def predict(self, df=None): """Predict using the prophet model. Parameters ---------- df: pd.DataFrame with dates for predictions (column ds), and capacity (column cap) if logistic growth. If not provided, predictions are made on the history. Returns ------- A pd.DataFrame with the forecast components. """ if self.history is None: raise Exception('Model has not been fit.') if df is None: df = self.history.copy() else: if df.shape[0] == 0: raise ValueError('Dataframe has no rows.') df = self.setup_dataframe(df.copy()) df['trend'] = self.predict_trend(df) seasonal_components = self.predict_seasonal_components(df) if self.uncertainty_samples: intervals = self.predict_uncertainty(df) else: intervals = None # Drop columns except ds, cap, floor, and trend cols = ['ds', 'trend'] if 'cap' in df: cols.append('cap') if self.logistic_floor: cols.append('floor') # Add in forecast components df2 = pd.concat((df[cols], intervals, seasonal_components), axis=1) df2['yhat'] = ( df2['trend'] * (1 + df2['multiplicative_terms']) + df2['additive_terms'] ) return df2 @staticmethod def piecewise_linear(t, deltas, k, m, changepoint_ts): """Evaluate the piecewise linear function. Parameters ---------- t: np.array of times on which the function is evaluated. deltas: np.array of rate changes at each changepoint. k: Float initial rate. m: Float initial offset. changepoint_ts: np.array of changepoint times. Returns ------- Vector y(t). """ # Intercept changes gammas = -changepoint_ts * deltas # Get cumulative slope and intercept at each t k_t = k * np.ones_like(t) m_t = m * np.ones_like(t) for s, t_s in enumerate(changepoint_ts): indx = t >= t_s k_t[indx] += deltas[s] m_t[indx] += gammas[s] return k_t * t + m_t @staticmethod def piecewise_logistic(t, cap, deltas, k, m, changepoint_ts): """Evaluate the piecewise logistic function. Parameters ---------- t: np.array of times on which the function is evaluated. cap: np.array of capacities at each t. deltas: np.array of rate changes at each changepoint. k: Float initial rate. m: Float initial offset. changepoint_ts: np.array of changepoint times. Returns ------- Vector y(t). """ # Compute offset changes k_cum = np.concatenate((np.atleast_1d(k), np.cumsum(deltas) + k)) gammas = np.zeros(len(changepoint_ts)) for i, t_s in enumerate(changepoint_ts): gammas[i] = ( (t_s - m - np.sum(gammas)) * (1 - k_cum[i] / k_cum[i + 1]) # noqa W503 ) # Get cumulative rate and offset at each t k_t = k * np.ones_like(t) m_t = m * np.ones_like(t) for s, t_s in enumerate(changepoint_ts): indx = t >= t_s k_t[indx] += deltas[s] m_t[indx] += gammas[s] return cap / (1 + np.exp(-k_t * (t - m_t))) @staticmethod def flat_trend(t, m): """Evaluate the flat trend function. Parameters ---------- t: np.array of times on which the function is evaluated. m: Float initial offset. Returns ------- Vector y(t). """ m_t = m * np.ones_like(t) return m_t def predict_trend(self, df): """Predict trend using the prophet model. Parameters ---------- df: Prediction dataframe. Returns ------- Vector with trend on prediction dates. """ k = np.nanmean(self.params['k']) m = np.nanmean(self.params['m']) deltas = np.nanmean(self.params['delta'], axis=0) t = np.array(df['t']) if self.growth == 'linear': trend = self.piecewise_linear(t, deltas, k, m, self.changepoints_t) elif self.growth == 'logistic': cap = df['cap_scaled'] trend = self.piecewise_logistic( t, cap, deltas, k, m, self.changepoints_t) elif self.growth == 'flat': # constant trend trend = self.flat_trend(t, m) return trend * self.y_scale + df['floor'] def predict_seasonal_components(self, df): """Predict seasonality components, holidays, and added regressors. Parameters ---------- df: Prediction dataframe. Returns ------- Dataframe with seasonal components. """ seasonal_features, _, component_cols, _ = ( self.make_all_seasonality_features(df) ) if self.uncertainty_samples: lower_p = 100 * (1.0 - self.interval_width) / 2 upper_p = 100 * (1.0 + self.interval_width) / 2 X = seasonal_features.values data = {} for component in component_cols.columns: beta_c = self.params['beta'] * component_cols[component].values comp = np.matmul(X, beta_c.transpose()) if component in self.component_modes['additive']: comp *= self.y_scale data[component] = np.nanmean(comp, axis=1) if self.uncertainty_samples: data[component + '_lower'] = self.percentile( comp, lower_p, axis=1, ) data[component + '_upper'] = self.percentile( comp, upper_p, axis=1, ) return pd.DataFrame(data) def sample_posterior_predictive(self, df): """Prophet posterior predictive samples. Parameters ---------- df: Prediction dataframe. Returns ------- Dictionary with posterior predictive samples for the forecast yhat and for the trend component. """ n_iterations = self.params['k'].shape[0] samp_per_iter = max(1, int(np.ceil( self.uncertainty_samples / float(n_iterations) ))) # Generate seasonality features once so we can re-use them. seasonal_features, _, component_cols, _ = ( self.make_all_seasonality_features(df) ) sim_values = {'yhat': [], 'trend': []} for i in range(n_iterations): for _j in range(samp_per_iter): sim = self.sample_model( df=df, seasonal_features=seasonal_features, iteration=i, s_a=component_cols['additive_terms'], s_m=component_cols['multiplicative_terms'], ) for key in sim_values: sim_values[key].append(sim[key]) for k, v in sim_values.items(): sim_values[k] = np.column_stack(v) return sim_values def predictive_samples(self, df): """Sample from the posterior predictive distribution. Returns samples for the main estimate yhat, and for the trend component. The shape of each output will be (nforecast x nsamples), where nforecast is the number of points being forecasted (the number of rows in the input dataframe) and nsamples is the number of posterior samples drawn. This is the argument `uncertainty_samples` in the Prophet constructor, which defaults to 1000. Parameters ---------- df: Dataframe with dates for predictions (column ds), and capacity (column cap) if logistic growth. Returns ------- Dictionary with keys "trend" and "yhat" containing posterior predictive samples for that component. """ df = self.setup_dataframe(df.copy()) sim_values = self.sample_posterior_predictive(df) return sim_values def predict_uncertainty(self, df): """Prediction intervals for yhat and trend. Parameters ---------- df: Prediction dataframe. Returns ------- Dataframe with uncertainty intervals. """ sim_values = self.sample_posterior_predictive(df) lower_p = 100 * (1.0 - self.interval_width) / 2 upper_p = 100 * (1.0 + self.interval_width) / 2 series = {} for key in ['yhat', 'trend']: series['{}_lower'.format(key)] = self.percentile( sim_values[key], lower_p, axis=1) series['{}_upper'.format(key)] = self.percentile( sim_values[key], upper_p, axis=1) return pd.DataFrame(series) def sample_model(self, df, seasonal_features, iteration, s_a, s_m): """Simulate observations from the extrapolated generative model. Parameters ---------- df: Prediction dataframe. seasonal_features: pd.DataFrame of seasonal features. iteration: Int sampling iteration to use parameters from. s_a: Indicator vector for additive components s_m: Indicator vector for multiplicative components Returns ------- Dataframe with trend and yhat, each like df['t']. """ trend = self.sample_predictive_trend(df, iteration) beta = self.params['beta'][iteration] Xb_a = np.matmul(seasonal_features.values, beta * s_a.values) * self.y_scale Xb_m = np.matmul(seasonal_features.values, beta * s_m.values) sigma = self.params['sigma_obs'][iteration] noise = np.random.normal(0, sigma, df.shape[0]) * self.y_scale return pd.DataFrame({ 'yhat': trend * (1 + Xb_m) + Xb_a + noise, 'trend': trend }) def sample_predictive_trend(self, df, iteration): """Simulate the trend using the extrapolated generative model. Parameters ---------- df: Prediction dataframe. iteration: Int sampling iteration to use parameters from. Returns ------- np.array of simulated trend over df['t']. """ k = self.params['k'][iteration] m = self.params['m'][iteration] deltas = self.params['delta'][iteration] t = np.array(df['t']) T = t.max() # New changepoints from a Poisson process with rate S on [1, T] if T > 1: S = len(self.changepoints_t) n_changes = np.random.poisson(S * (T - 1)) else: n_changes = 0 if n_changes > 0: changepoint_ts_new = 1 + np.random.rand(n_changes) * (T - 1) changepoint_ts_new.sort() else: changepoint_ts_new = [] # Get the empirical scale of the deltas, plus epsilon to avoid NaNs. lambda_ = np.mean(np.abs(deltas)) + 1e-8 # Sample deltas deltas_new = np.random.laplace(0, lambda_, n_changes) # Prepend the times and deltas from the history changepoint_ts = np.concatenate((self.changepoints_t, changepoint_ts_new)) deltas = np.concatenate((deltas, deltas_new)) if self.growth == 'linear': trend = self.piecewise_linear(t, deltas, k, m, changepoint_ts) elif self.growth == 'logistic': cap = df['cap_scaled'] trend = self.piecewise_logistic(t, cap, deltas, k, m, changepoint_ts) elif self.growth == 'flat': trend = self.flat_trend(t, m) return trend * self.y_scale + df['floor'] def percentile(self, a, *args, **kwargs): """ We rely on np.nanpercentile in the rare instances where there are a small number of bad samples with MCMC that contain NaNs. However, since np.nanpercentile is far slower than np.percentile, we only fall back to it if the array contains NaNs. See https://github.com/facebook/prophet/issues/1310 for more details. """ fn = np.nanpercentile if np.isnan(a).any() else np.percentile return fn(a, *args, **kwargs) def make_future_dataframe(self, periods, freq='D', include_history=True): """Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods. """ if self.history_dates is None: raise Exception('Model has not been fit.') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates}) def plot(self, fcst, ax=None, uncertainty=True, plot_cap=True, xlabel='ds', ylabel='y', figsize=(10, 6), include_legend=False): """Plot the Prophet forecast. Parameters ---------- fcst: pd.DataFrame output of self.predict. ax: Optional matplotlib axes on which to plot. uncertainty: Optional boolean to plot uncertainty intervals. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. xlabel: Optional label name on X-axis ylabel: Optional label name on Y-axis figsize: Optional tuple width, height in inches. include_legend: Optional boolean to add legend to the plot. Returns ------- A matplotlib figure. """ return plot( m=self, fcst=fcst, ax=ax, uncertainty=uncertainty, plot_cap=plot_cap, xlabel=xlabel, ylabel=ylabel, figsize=figsize, include_legend=include_legend ) def plot_components(self, fcst, uncertainty=True, plot_cap=True, weekly_start=0, yearly_start=0, figsize=None): """Plot the Prophet forecast components. Will plot whichever are available of: trend, holidays, weekly seasonality, and yearly seasonality. Parameters ---------- fcst: pd.DataFrame output of self.predict. uncertainty: Optional boolean to plot uncertainty intervals. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. weekly_start: Optional int specifying the start day of the weekly seasonality plot. 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on. yearly_start: Optional int specifying the start day of the yearly seasonality plot. 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on. figsize: Optional tuple width, height in inches. Returns ------- A matplotlib figure. """ return plot_components( m=self, fcst=fcst, uncertainty=uncertainty, plot_cap=plot_cap, weekly_start=weekly_start, yearly_start=yearly_start, figsize=figsize )
bsd-3-clause
jreback/pandas
pandas/tests/indexes/multi/test_sorting.py
1
8730
import random import numpy as np import pytest from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex import pandas._testing as tm from pandas.core.indexes.frozen import FrozenList def test_sortlevel(idx): tuples = list(idx) random.shuffle(tuples) index = MultiIndex.from_tuples(tuples) sorted_idx, _ = index.sortlevel(0) expected = MultiIndex.from_tuples(sorted(tuples)) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(0, ascending=False) assert sorted_idx.equals(expected[::-1]) sorted_idx, _ = index.sortlevel(1) by1 = sorted(tuples, key=lambda x: (x[1], x[0])) expected = MultiIndex.from_tuples(by1) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(1, ascending=False) assert sorted_idx.equals(expected[::-1]) def test_sortlevel_not_sort_remaining(): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC")) sorted_idx, _ = mi.sortlevel("A", sort_remaining=False) assert sorted_idx.equals(mi) def test_sortlevel_deterministic(): tuples = [ ("bar", "one"), ("foo", "two"), ("qux", "two"), ("foo", "one"), ("baz", "two"), ("qux", "one"), ] index = MultiIndex.from_tuples(tuples) sorted_idx, _ = index.sortlevel(0) expected = MultiIndex.from_tuples(sorted(tuples)) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(0, ascending=False) assert sorted_idx.equals(expected[::-1]) sorted_idx, _ = index.sortlevel(1) by1 = sorted(tuples, key=lambda x: (x[1], x[0])) expected = MultiIndex.from_tuples(by1) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(1, ascending=False) assert sorted_idx.equals(expected[::-1]) def test_numpy_argsort(idx): result = np.argsort(idx) expected = idx.argsort() tm.assert_numpy_array_equal(result, expected) # these are the only two types that perform # pandas compatibility input validation - the # rest already perform separate (or no) such # validation via their 'values' attribute as # defined in pandas.core.indexes/base.py - they # cannot be changed at the moment due to # backwards compatibility concerns if isinstance(type(idx), (CategoricalIndex, RangeIndex)): msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(idx, axis=1) msg = "the 'kind' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(idx, kind="mergesort") msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(idx, order=("a", "b")) def test_unsortedindex(): # GH 11897 mi = MultiIndex.from_tuples( [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], names=["one", "two"], ) df = DataFrame([[i, 10 * i] for i in range(6)], index=mi, columns=["one", "two"]) # GH 16734: not sorted, but no real slicing result = df.loc(axis=0)["z", "a"] expected = df.iloc[0] tm.assert_series_equal(result, expected) msg = ( "MultiIndex slicing requires the index to be lexsorted: " r"slicing on levels \[1\], lexsort depth 0" ) with pytest.raises(UnsortedIndexError, match=msg): df.loc(axis=0)["z", slice("a")] df.sort_index(inplace=True) assert len(df.loc(axis=0)["z", :]) == 2 with pytest.raises(KeyError, match="'q'"): df.loc(axis=0)["q", :] def test_unsortedindex_doc_examples(): # https://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex dfm = DataFrame( {"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4)} ) dfm = dfm.set_index(["jim", "joe"]) with tm.assert_produces_warning(PerformanceWarning): dfm.loc[(1, "z")] msg = r"Key length \(2\) was greater than MultiIndex lexsort depth \(1\)" with pytest.raises(UnsortedIndexError, match=msg): dfm.loc[(0, "y"):(1, "z")] assert not dfm.index.is_lexsorted() assert dfm.index.lexsort_depth == 1 # sort it dfm = dfm.sort_index() dfm.loc[(1, "z")] dfm.loc[(0, "y"):(1, "z")] assert dfm.index.is_lexsorted() assert dfm.index.lexsort_depth == 2 def test_reconstruct_sort(): # starts off lexsorted & monotonic mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) assert mi.is_lexsorted() assert mi.is_monotonic recons = mi._sort_levels_monotonic() assert recons.is_lexsorted() assert recons.is_monotonic assert mi is recons assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) # cannot convert to lexsorted mi = MultiIndex.from_tuples( [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], names=["one", "two"], ) assert not mi.is_lexsorted() assert not mi.is_monotonic recons = mi._sort_levels_monotonic() assert not recons.is_lexsorted() assert not recons.is_monotonic assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) # cannot convert to lexsorted mi = MultiIndex( levels=[["b", "d", "a"], [1, 2, 3]], codes=[[0, 1, 0, 2], [2, 0, 0, 1]], names=["col1", "col2"], ) assert not mi.is_lexsorted() assert not mi.is_monotonic recons = mi._sort_levels_monotonic() assert not recons.is_lexsorted() assert not recons.is_monotonic assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) def test_reconstruct_remove_unused(): # xref to GH 2770 df = DataFrame( [["deleteMe", 1, 9], ["keepMe", 2, 9], ["keepMeToo", 3, 9]], columns=["first", "second", "third"], ) df2 = df.set_index(["first", "second"], drop=False) df2 = df2[df2["first"] != "deleteMe"] # removed levels are there expected = MultiIndex( levels=[["deleteMe", "keepMe", "keepMeToo"], [1, 2, 3]], codes=[[1, 2], [1, 2]], names=["first", "second"], ) result = df2.index tm.assert_index_equal(result, expected) expected = MultiIndex( levels=[["keepMe", "keepMeToo"], [2, 3]], codes=[[0, 1], [0, 1]], names=["first", "second"], ) result = df2.index.remove_unused_levels() tm.assert_index_equal(result, expected) # idempotent result2 = result.remove_unused_levels() tm.assert_index_equal(result2, expected) assert result2.is_(result) @pytest.mark.parametrize( "first_type,second_type", [("int64", "int64"), ("datetime64[D]", "str")] ) def test_remove_unused_levels_large(first_type, second_type): # GH16556 # because tests should be deterministic (and this test in particular # checks that levels are removed, which is not the case for every # random input): rng = np.random.RandomState(4) # seed is arbitrary value that works size = 1 << 16 df = DataFrame( { "first": rng.randint(0, 1 << 13, size).astype(first_type), "second": rng.randint(0, 1 << 10, size).astype(second_type), "third": rng.rand(size), } ) df = df.groupby(["first", "second"]).sum() df = df[df.third < 0.1] result = df.index.remove_unused_levels() assert len(result.levels[0]) < len(df.index.levels[0]) assert len(result.levels[1]) < len(df.index.levels[1]) assert result.equals(df.index) expected = df.reset_index().set_index(["first", "second"]).index tm.assert_index_equal(result, expected) @pytest.mark.parametrize("level0", [["a", "d", "b"], ["a", "d", "b", "unused"]]) @pytest.mark.parametrize( "level1", [["w", "x", "y", "z"], ["w", "x", "y", "z", "unused"]] ) def test_remove_unused_nan(level0, level1): # GH 18417 mi = MultiIndex(levels=[level0, level1], codes=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]) result = mi.remove_unused_levels() tm.assert_index_equal(result, mi) for level in 0, 1: assert "unused" not in result.levels[level] def test_argsort(idx): result = idx.argsort() expected = idx.values.argsort() tm.assert_numpy_array_equal(result, expected) def test_remove_unused_levels_with_nan(): # GH 37510 idx = Index([(1, np.nan), (3, 4)]).rename(["id1", "id2"]) idx = idx.set_levels(["a", np.nan], level="id1") idx = idx.remove_unused_levels() result = idx.levels expected = FrozenList([["a", np.nan], [4]]) assert str(result) == str(expected)
bsd-3-clause
kiyoto/statsmodels
statsmodels/stats/tests/test_panel_robustcov.py
34
2750
# -*- coding: utf-8 -*- """Test for panel robust covariance estimators after pooled ols this follows the example from xtscc paper/help Created on Tue May 22 20:27:57 2012 Author: Josef Perktold """ from statsmodels.compat.python import range, lmap import numpy as np from numpy.testing import assert_almost_equal from statsmodels.regression.linear_model import OLS from statsmodels.tools.tools import add_constant import statsmodels.stats.sandwich_covariance as sw def test_panel_robust_cov(): import pandas as pa import statsmodels.datasets.grunfeld as gr from .results.results_panelrobust import results as res_stata dtapa = gr.data.load_pandas() #Stata example/data seems to miss last firm dtapa_endog = dtapa.endog[:200] dtapa_exog = dtapa.exog[:200] res = OLS(dtapa_endog, add_constant(dtapa_exog[['value', 'capital']], prepend=False)).fit() #time indicator in range(max Ti) time = np.asarray(dtapa_exog[['year']]) time -= time.min() time = np.squeeze(time).astype(int) #sw.cov_nw_panel requires bounds instead of index tidx = [(i*20, 20*(i+1)) for i in range(10)] #firm index in range(n_firms) firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'), return_inverse=True) #panel newey west standard errors cov = sw.cov_nw_panel(res, 0, tidx, use_correction='hac') #dropping numpy 1.4 soon #np.testing.assert_allclose(cov, res_stata.cov_pnw0_stata, rtol=1e-6) assert_almost_equal(cov, res_stata.cov_pnw0_stata, decimal=4) cov = sw.cov_nw_panel(res, 1, tidx, use_correction='hac') #np.testing.assert_allclose(cov, res_stata.cov_pnw1_stata, rtol=1e-6) assert_almost_equal(cov, res_stata.cov_pnw1_stata, decimal=4) cov = sw.cov_nw_panel(res, 4, tidx) #check default #np.testing.assert_allclose(cov, res_stata.cov_pnw4_stata, rtol=1e-6) assert_almost_equal(cov, res_stata.cov_pnw4_stata, decimal=4) #cluster robust standard errors cov_clu = sw.cov_cluster(res, firm_id) assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4) #cluster robust standard errors, non-int groups cov_clu = sw.cov_cluster(res, lmap(str, firm_id)) assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4) #Driscoll and Kraay panel robust standard errors rcov = sw.cov_nw_groupsum(res, 0, time, use_correction=0) assert_almost_equal(rcov, res_stata.cov_dk0_stata, decimal=4) rcov = sw.cov_nw_groupsum(res, 1, time, use_correction=0) assert_almost_equal(rcov, res_stata.cov_dk1_stata, decimal=4) rcov = sw.cov_nw_groupsum(res, 4, time) #check default assert_almost_equal(rcov, res_stata.cov_dk4_stata, decimal=4)
bsd-3-clause
henrykironde/scikit-learn
sklearn/feature_selection/variance_threshold.py
238
2594
# Author: Lars Buitinck <L.J.Buitinck@uva.nl> # License: 3-clause BSD import numpy as np from ..base import BaseEstimator from .base import SelectorMixin from ..utils import check_array from ..utils.sparsefuncs import mean_variance_axis from ..utils.validation import check_is_fitted class VarianceThreshold(BaseEstimator, SelectorMixin): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Read more in the :ref:`User Guide <variance_threshold>`. Parameters ---------- threshold : float, optional Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ def __init__(self, threshold=0.): self.threshold = threshold def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self, 'variances_') return self.variances_ > self.threshold
bsd-3-clause
lavizhao/Tyrion
learner.py
1
4231
#coding: utf-8 ''' 这个是学习的主要文件 ''' from data import load_label,load_data,load_data_total from scipy.sparse import csr_matrix import numpy as np from sklearn.naive_bayes import GaussianNB as NB from sklearn import linear_model from sklearn import svm from sklearn.ensemble import RandomForestClassifier as RF from sklearn.ensemble import ExtraTreesClassifier as ET from sklearn.ensemble import GradientBoostingClassifier as GBDT from sklearn.metrics import f1_score from sklearn.metrics import precision_score as pscore from sklearn.metrics import recall_score as rscore from data import get_conf import sys #先弄一下dev def learn(): clf = linear_model.SGDClassifier(penalty="l2",l1_ratio=0,alpha=0.001,class_weight={1:0.3,0:0.7},n_jobs=3) rd = 100 * 1000 iter_num = 4 for i in range(iter_num): print "round",i train = load_data("train",rd) train_label = load_label("train") train_label = np.array(train_label) count = 0 for ptrain in train: print "partial",count plabel = train_label[:rd] train_label = train_label[rd:] if sum(plabel) > 0.2 * len(plabel): print "正例个数",sum(plabel) assert len(ptrain) == len(plabel) clf.partial_fit(ptrain,plabel,classes=[0,1]) else : break count += 1 print 100 * "=" print "train_label",len(train_label) return clf #试用LR、RF def learn_total(): clf = RF(n_estimators=200,max_features="auto",max_depth=8,min_samples_split=10,min_samples_leaf=2,n_jobs=3,oob_score=True,random_state=728) #max_depth = 8最好 #clf = GBDT(n_estimators=100,max_features="auto",max_depth=8,min_samples_split=10,min_samples_leaf=2,verbose=3) rd = 500 * 1000 train = load_data_total("train",rd) train_label = load_label("train") train_label = train_label[:len(train)] train_label = np.array(train_label) print "train_label",len(train_label),"train",len(train) print "train特征数",len(train[0]) print "learn" clf.fit(train,train_label) return clf def predict(clf): pred_test = "true" rd = 400000 temp_dev = load_data("dev",rd) dev = [] for pdev in temp_dev : dev.extend(pdev) dev_label = load_label("dev") print "dev样本大小",len(dev),len(dev_label) print "dev特征数",len(dev[0]) result = clf.predict(dev) print "dev正样本预测数",sum(result) f1_s = f1_score(dev_label, result, average='binary') * 100.0 p_s = pscore(dev_label, result, average = 'binary') * 100.0 r_s = rscore(dev_label, result, average = 'binary') * 100.0 print "f1值", f1_s print "准确率",p_s print "召回率",r_s print "手算", 2 * p_s * r_s/(p_s + r_s) if pred_test == "false": sys.exit(1) cf = get_conf() f = open(cf["pred_dir"]) f.readline() test_data = open(cf["pred129"]) final = set() t = open(cf["final"],"w") t.write("user_id,item_id\n") count = 0 rd = 200000 ui_list = [] for tran in test_data: tran = tran.split(',') user,item = tran[0],tran[1] ui_list.append("%s,%s\n"%(user,item)) feature_list = [] for line in f: sp = line.split(',') sp = [int(i) for i in sp] feature_list.append(sp) count += 1 if count % rd == 0: res = clf.predict(feature_list) for i in range(len(res)): if res[i] == 1: final.add(ui_list[i]) feature_list = [] ui_list = ui_list[rd:] print count res = clf.predict(feature_list) for i in range(len(res)): if res[i] == 1: final.add(ui_list[i]) print "剩余东西长度",len(res),len(ui_list) print "test预测结果",len(final) for i in final: t.write(i) if __name__ == '__main__': clf = learn_total() #clf = learn() predict(clf)
mit
akiradeveloper/blktrace
btt/btt_plot.py
8
13237
#! /usr/bin/env python # # btt_plot.py: Generate matplotlib plots for BTT generate data files # # (C) Copyright 2009 Hewlett-Packard Development Company, L.P. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """ btt_plot.py: Generate matplotlib plots for BTT generated data files Files handled: AQD - Average Queue Depth Running average of queue depths BNOS - Block numbers accessed Markers for each block Q2D - Queue to Issue latencies Running averages D2C - Issue to Complete latencies Running averages Q2C - Queue to Complete latencies Running averages Usage: btt_plot_aqd.py equivalent to: btt_plot.py -t aqd <type>=aqd btt_plot_bnos.py equivalent to: btt_plot.py -t bnos <type>=bnos btt_plot_q2d.py equivalent to: btt_plot.py -t q2d <type>=q2d btt_plot_d2c.py equivalent to: btt_plot.py -t d2c <type>=d2c btt_plot_q2c.py equivalent to: btt_plot.py -t q2c <type>=q2c Arguments: [ -A | --generate-all ] Default: False [ -L | --no-legend ] Default: Legend table produced [ -o <file> | --output=<file> ] Default: <type>.png [ -T <string> | --title=<string> ] Default: Based upon <type> [ -v | --verbose ] Default: False <data-files...> The -A (--generate-all) argument is different: when this is specified, an attempt is made to generate default plots for all 5 types (aqd, bnos, q2d, d2c and q2c). It will find files with the appropriate suffix for each type ('aqd.dat' for example). If such files are found, a plot for that type will be made. The output file name will be the default for each type. The -L (--no-legend) option will be obeyed for all plots, but the -o (--output) and -T (--title) options will be ignored. """ __author__ = 'Alan D. Brunelle <alan.brunelle@hp.com>' #------------------------------------------------------------------------------ import matplotlib matplotlib.use('Agg') import getopt, glob, os, sys import matplotlib.pyplot as plt plot_size = [10.9, 8.4] # inches... add_legend = True generate_all = False output_file = None title_str = None type = None verbose = False types = [ 'aqd', 'q2d', 'd2c', 'q2c', 'live', 'bnos' ] progs = [ 'btt_plot_%s.py' % t for t in types ] get_base = lambda file: file[file.find('_')+1:file.rfind('_')] #------------------------------------------------------------------------------ def fatal(msg): """Generate fatal error message and exit""" print >>sys.stderr, 'FATAL: %s' % msg sys.exit(1) #------------------------------------------------------------------------------ def gen_legends(ax, legends): leg = ax.legend(legends, 'best', shadow=True) frame = leg.get_frame() frame.set_facecolor('0.80') for t in leg.get_texts(): t.set_fontsize('xx-small') #---------------------------------------------------------------------- def get_data(files): """Retrieve data from files provided. Returns a database containing: 'min_x', 'max_x' - Minimum and maximum X values found 'min_y', 'max_y' - Minimum and maximum Y values found 'x', 'y' - X & Y value arrays 'ax', 'ay' - Running average over X & Y -- if > 10 values provided... """ #-------------------------------------------------------------- def check(mn, mx, v): """Returns new min, max, and float value for those passed in""" v = float(v) if mn == None or v < mn: mn = v if mx == None or v > mx: mx = v return mn, mx, v #-------------------------------------------------------------- def avg(xs, ys): """Computes running average for Xs and Ys""" #------------------------------------------------------ def _avg(vals): """Computes average for array of values passed""" total = 0.0 for val in vals: total += val return total / len(vals) #------------------------------------------------------ if len(xs) < 1000: return xs, ys axs = [xs[0]] ays = [ys[0]] _xs = [xs[0]] _ys = [ys[0]] x_range = (xs[-1] - xs[0]) / 100 for idx in range(1, len(ys)): if (xs[idx] - _xs[0]) > x_range: axs.append(_avg(_xs)) ays.append(_avg(_ys)) del _xs, _ys _xs = [xs[idx]] _ys = [ys[idx]] else: _xs.append(xs[idx]) _ys.append(ys[idx]) if len(_xs) > 1: axs.append(_avg(_xs)) ays.append(_avg(_ys)) return axs, ays #-------------------------------------------------------------- global verbose db = {} min_x = max_x = min_y = max_y = None for file in files: if not os.path.exists(file): fatal('%s not found' % file) elif verbose: print 'Processing %s' % file xs = [] ys = [] for line in open(file, 'r'): f = line.rstrip().split(None) if line.find('#') == 0 or len(f) < 2: continue (min_x, max_x, x) = check(min_x, max_x, f[0]) (min_y, max_y, y) = check(min_y, max_y, f[1]) xs.append(x) ys.append(y) db[file] = {'x':xs, 'y':ys} if len(xs) > 10: db[file]['ax'], db[file]['ay'] = avg(xs, ys) else: db[file]['ax'] = db[file]['ay'] = None db['min_x'] = min_x db['max_x'] = max_x db['min_y'] = min_y db['max_y'] = max_y return db #---------------------------------------------------------------------- def parse_args(args): """Parse command line arguments. Returns list of (data) files that need to be processed -- /unless/ the -A (--generate-all) option is passed, in which case superfluous data files are ignored... """ global add_legend, output_file, title_str, type, verbose global generate_all prog = args[0][args[0].rfind('/')+1:] if prog == 'btt_plot.py': pass elif not prog in progs: fatal('%s not a valid command name' % prog) else: type = prog[prog.rfind('_')+1:prog.rfind('.py')] s_opts = 'ALo:t:T:v' l_opts = [ 'generate-all', 'type', 'no-legend', 'output', 'title', 'verbose' ] try: (opts, args) = getopt.getopt(args[1:], s_opts, l_opts) except getopt.error, msg: print >>sys.stderr, msg fatal(__doc__) for (o, a) in opts: if o in ('-A', '--generate-all'): generate_all = True elif o in ('-L', '--no-legend'): add_legend = False elif o in ('-o', '--output'): output_file = a elif o in ('-t', '--type'): if not a in types: fatal('Type %s not supported' % a) type = a elif o in ('-T', '--title'): title_str = a elif o in ('-v', '--verbose'): verbose = True if type == None and not generate_all: fatal('Need type of data files to process - (-t <type>)') return args #------------------------------------------------------------------------------ def gen_title(fig, type, title_str): """Sets the title for the figure based upon the type /or/ user title""" if title_str != None: pass elif type == 'aqd': title_str = 'Average Queue Depth' elif type == 'bnos': title_str = 'Block Numbers Accessed' elif type == 'q2d': title_str = 'Queue (Q) To Issue (D) Average Latencies' elif type == 'd2c': title_str = 'Issue (D) To Complete (C) Average Latencies' elif type == 'q2c': title_str = 'Queue (Q) To Complete (C) Average Latencies' title = fig.text(.5, .95, title_str, horizontalalignment='center') title.set_fontsize('large') #------------------------------------------------------------------------------ def gen_labels(db, ax, type): """Generate X & Y 'axis'""" #---------------------------------------------------------------------- def gen_ylabel(ax, type): """Set the Y axis label based upon the type""" if type == 'aqd': str = 'Number of Requests Queued' elif type == 'bnos': str = 'Block Number' else: str = 'Seconds' ax.set_ylabel(str) #---------------------------------------------------------------------- xdelta = 0.1 * (db['max_x'] - db['min_x']) ydelta = 0.1 * (db['max_y'] - db['min_y']) ax.set_xlim(db['min_x'] - xdelta, db['max_x'] + xdelta) ax.set_ylim(db['min_y'] - ydelta, db['max_y'] + ydelta) ax.set_xlabel('Runtime (seconds)') ax.grid(True) gen_ylabel(ax, type) #------------------------------------------------------------------------------ def generate_output(type, db): """Generate the output plot based upon the type and database""" #---------------------------------------------------------------------- def color(idx, style): """Returns a color/symbol type based upon the index passed.""" colors = [ 'b', 'g', 'r', 'c', 'm', 'y', 'k' ] l_styles = [ '-', ':', '--', '-.' ] m_styles = [ 'o', '+', '.', ',', 's', 'v', 'x', '<', '>' ] color = colors[idx % len(colors)] if style == 'line': style = l_styles[(idx / len(l_styles)) % len(l_styles)] elif style == 'marker': style = m_styles[(idx / len(m_styles)) % len(m_styles)] return '%s%s' % (color, style) #---------------------------------------------------------------------- global add_legend, output_file, title_str, verbose if output_file != None: ofile = output_file else: ofile = '%s.png' % type if verbose: print 'Generating plot into %s' % ofile fig = plt.figure(figsize=plot_size) ax = fig.add_subplot(111) gen_title(fig, type, title_str) gen_labels(db, ax, type) idx = 0 if add_legend: legends = [] else: legends = None keys = [] for file in db.iterkeys(): if not file in ['min_x', 'max_x', 'min_y', 'max_y']: keys.append(file) keys.sort() for file in keys: dat = db[file] if type == 'bnos': ax.plot(dat['x'], dat['y'], color(idx, 'marker'), markersize=1) elif dat['ax'] == None: continue # Don't add legend else: ax.plot(dat['ax'], dat['ay'], color(idx, 'line'), linewidth=1.0) if add_legend: legends.append(get_base(file)) idx += 1 if add_legend and len(legends) > 0: gen_legends(ax, legends) plt.savefig(ofile) #------------------------------------------------------------------------------ def get_files(type): """Returns the list of files for the -A option based upon type""" if type == 'bnos': files = [] for fn in glob.glob('*c.dat'): for t in [ 'q2q', 'd2d', 'q2c', 'd2c' ]: if fn.find(t) >= 0: break else: files.append(fn) else: files = glob.glob('*%s.dat' % type) return files #------------------------------------------------------------------------------ def do_bnos(files): for file in files: base = get_base(file) title_str = 'Block Numbers Accessed: %s' % base output_file = 'bnos_%s.png' % base generate_output(t, get_data([file])) #------------------------------------------------------------------------------ def do_live(files): global plot_size #---------------------------------------------------------------------- def get_live_data(fn): xs = [] ys = [] for line in open(fn, 'r'): f = line.rstrip().split() if f[0] != '#' and len(f) == 2: xs.append(float(f[0])) ys.append(float(f[1])) return xs, ys #---------------------------------------------------------------------- def live_sort(a, b): if a[0] == 'sys' and b[0] == 'sys': return 0 elif a[0] == 'sys' or a[2][0] < b[2][0]: return -1 elif b[0] == 'sys' or a[2][0] > b[2][0]: return 1 else: return 0 #---------------------------------------------------------------------- def turn_off_ticks(ax): for tick in ax.xaxis.get_major_ticks(): tick.tick1On = tick.tick2On = False for tick in ax.yaxis.get_major_ticks(): tick.tick1On = tick.tick2On = False for tick in ax.xaxis.get_minor_ticks(): tick.tick1On = tick.tick2On = False for tick in ax.yaxis.get_minor_ticks(): tick.tick1On = tick.tick2On = False #---------------------------------------------------------------------- fig = plt.figure(figsize=plot_size) ax = fig.add_subplot(111) db = [] for fn in files: if not os.path.exists(fn): continue (xs, ys) = get_live_data(fn) db.append([fn[:fn.find('_live.dat')], xs, ys]) db.sort(live_sort) for rec in db: ax.plot(rec[1], rec[2]) gen_title(fig, 'live', 'Active I/O Per Device') ax.set_xlabel('Runtime (seconds)') ax.set_ylabel('Device') ax.grid(False) ax.set_xlim(-0.1, db[0][1][-1]+1) ax.set_yticks([idx for idx in range(0, len(db))]) ax.yaxis.set_ticklabels([rec[0] for rec in db]) turn_off_ticks(ax) plt.savefig('live.png') plt.savefig('live.eps') #------------------------------------------------------------------------------ if __name__ == '__main__': files = parse_args(sys.argv) if generate_all: output_file = title_str = type = None for t in types: files = get_files(t) if len(files) == 0: continue elif t == 'bnos': do_bnos(files) elif t == 'live': do_live(files) else: generate_output(t, get_data(files)) continue elif len(files) < 1: fatal('Need data files to process') else: generate_output(type, get_data(files)) sys.exit(0)
gpl-2.0
bikong2/scikit-learn
examples/covariance/plot_robust_vs_empirical_covariance.py
248
6359
r""" ======================================= Robust vs Empirical covariance estimate ======================================= The usual covariance maximum likelihood estimate is very sensitive to the presence of outliers in the data set. In such a case, it would be better to use a robust estimator of covariance to guarantee that the estimation is resistant to "erroneous" observations in the data set. Minimum Covariance Determinant Estimator ---------------------------------------- The Minimum Covariance Determinant estimator is a robust, high-breakdown point (i.e. it can be used to estimate the covariance matrix of highly contaminated datasets, up to :math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of covariance. The idea is to find :math:`\frac{n_\text{samples} + n_\text{features}+1}{2}` observations whose empirical covariance has the smallest determinant, yielding a "pure" subset of observations from which to compute standards estimates of location and covariance. After a correction step aiming at compensating the fact that the estimates were learned from only a portion of the initial data, we end up with robust estimates of the data set location and covariance. The Minimum Covariance Determinant estimator (MCD) has been introduced by P.J.Rousseuw in [1]_. Evaluation ---------- In this example, we compare the estimation errors that are made when using various types of location and covariance estimates on contaminated Gaussian distributed data sets: - The mean and the empirical covariance of the full dataset, which break down as soon as there are outliers in the data set - The robust MCD, that has a low error provided :math:`n_\text{samples} > 5n_\text{features}` - The mean and the empirical covariance of the observations that are known to be good ones. This can be considered as a "perfect" MCD estimation, so one can trust our implementation by comparing to this case. References ---------- .. [1] P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984. .. [2] Johanna Hardin, David M Rocke. Journal of Computational and Graphical Statistics. December 1, 2005, 14(4): 928-946. .. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust estimation in signal processing: A tutorial-style treatment of fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn.covariance import EmpiricalCovariance, MinCovDet # example settings n_samples = 80 n_features = 5 repeat = 10 range_n_outliers = np.concatenate( (np.linspace(0, n_samples / 8, 5), np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])) # definition of arrays to store results err_loc_mcd = np.zeros((range_n_outliers.size, repeat)) err_cov_mcd = np.zeros((range_n_outliers.size, repeat)) err_loc_emp_full = np.zeros((range_n_outliers.size, repeat)) err_cov_emp_full = np.zeros((range_n_outliers.size, repeat)) err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat)) err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat)) # computation for i, n_outliers in enumerate(range_n_outliers): for j in range(repeat): rng = np.random.RandomState(i * j) # generate data X = rng.randn(n_samples, n_features) # add some outliers outliers_index = rng.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (np.random.randint(2, size=(n_outliers, n_features)) - 0.5) X[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False # fit a Minimum Covariance Determinant (MCD) robust estimator to data mcd = MinCovDet().fit(X) # compare raw robust estimates with the true location and covariance err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2) err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features)) # compare estimators learned from the full data set with true # parameters err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2) err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm( np.eye(n_features)) # compare with an empirical covariance learned from a pure data set # (i.e. "perfect" mcd) pure_X = X[inliers_mask] pure_location = pure_X.mean(0) pure_emp_cov = EmpiricalCovariance().fit(pure_X) err_loc_emp_pure[i, j] = np.sum(pure_location ** 2) err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features)) # Display results font_prop = matplotlib.font_manager.FontProperties(size=11) plt.subplot(2, 1, 1) plt.errorbar(range_n_outliers, err_loc_mcd.mean(1), yerr=err_loc_mcd.std(1) / np.sqrt(repeat), label="Robust location", color='m') plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1), yerr=err_loc_emp_full.std(1) / np.sqrt(repeat), label="Full data set mean", color='green') plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1), yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat), label="Pure data set mean", color='black') plt.title("Influence of outliers on the location estimation") plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)") plt.legend(loc="upper left", prop=font_prop) plt.subplot(2, 1, 2) x_size = range_n_outliers.size plt.errorbar(range_n_outliers, err_cov_mcd.mean(1), yerr=err_cov_mcd.std(1), label="Robust covariance (mcd)", color='m') plt.errorbar(range_n_outliers[:(x_size / 5 + 1)], err_cov_emp_full.mean(1)[:(x_size / 5 + 1)], yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)], label="Full data set empirical covariance", color='green') plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)], err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green', ls='--') plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1), yerr=err_cov_emp_pure.std(1), label="Pure data set empirical covariance", color='black') plt.title("Influence of outliers on the covariance estimation") plt.xlabel("Amount of contamination (%)") plt.ylabel("RMSE") plt.legend(loc="upper center", prop=font_prop) plt.show()
bsd-3-clause
hrjn/scikit-learn
examples/decomposition/plot_pca_iris.py
49
1511
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= PCA example with Iris Data-set ========================================================= Principal Component Analysis applied to the Iris dataset. See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ print(__doc__) # Code source: Gaël Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target fig = plt.figure(1, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]: ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral, edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) plt.show()
bsd-3-clause
rohanp11/IITIGNSSR
src/vtecvtime.py
1
7938
# Imports import os,copy,csv import numpy as np import pandas as pd import matplotlib.pyplot as plt from math import radians,sin from datetime import date, timedelta as td # Function to cheak if leap year def checkleap(year): return ((year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0))) # Date of the year Conversion def convert_date(day,month,year): if checkleap(year)==True: days = [31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335] if month == 1: return day else: return day+days[month-2] else: days = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] if month == 1: return day else: return day+days[month-2] #Function to check presence of Nan def checknonan(df): for i in df: if np.isnan(i): return False return True #Function to count number of Nan def countnan(df): count = 0 for i in df: if np.isnan(i): count = count+1 return count #Function to convet time of week in seconds to hours def gettime(times): hours = 0.0 minutes = 0.0 t = -1 tm = [] for each in times: if t!=each: minutes = minutes+1 if minutes>60: hours = hours+1 minutes = minutes%60 t = each tm1 = float(hours+(float(minutes/60))) tm.append(tm1) return tm #Function to check validity of dates def validdt(start_date,start_month,start_year,end_date,end_month,end_year,date_from,date_upto): if start_year>end_year or (start_year<date_from.year or end_year>date_upto.year) : return False elif start_year==end_year and (start_year==date_from.year and end_year==date_upto.year) and (start_month>end_month or start_month<date_from.month or end_month>date_upto.month): return False elif start_year==end_year and (start_year==date_from.year and end_year==date_upto.year) and start_month==end_month and (start_month==date_from.month and end_month==date_upto.month) and (start_date>end_date or start_date<date_from.day or end_date>date_upto.day): return False return True #Function to obtain range of dates def daterange(start_date, end_date): for n in range(int ((end_date - start_date).days)): yield start_date + td(n) #Function to calculate VTEC def conv_vtec(tec,ele): vtec = [] for i,j in zip(tec,ele): vtec.append(float(i)*float(sin(radians(float(j))))) return vtec #Function to convert folder name into human readable format date def conv_readdate(dt): dt_year = 2000+int(dt/1000) dt_doy = dt%1000 t = date.fromordinal(date(dt_year, 1, 1).toordinal() + dt_doy - 1) return t def main(): #Check latest date of the data available os.chdir('/home/deathstroke/projects/IITI_GNSSR/data/') sub = [x for x in os.listdir('.') if os.path.isdir(x)] dt = max(sub) date_upto = conv_readdate(int(dt)) os.chdir('/home/deathstroke/projects/IITI_GNSSR/iiti_gnssr/') #Check oldest date of the data available os.chdir('/home/deathstroke/projects/IITI_GNSSR/data/') sub = [x for x in os.listdir('.') if os.path.isdir(x)] dt = min(sub) date_from = conv_readdate(int(dt)) os.chdir('/home/deathstroke/projects/IITI_GNSSR/iiti_gnssr/') print ("\nData available from %s to %s\n" %(date_from,date_upto)) alpha=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X'] #Taking valid start and end dates as input from user validity = False while(validity!=True): start_date = int(input("Enter Start Date(dd):")) start_month = int(input("Enter Start Month(mm):")) start_year = int(input("Enter Start Year(yyyy):")) print ("\n") end_date = int(input("Enter End Date(dd):")) end_month = int(input("Enter End Month(mm):")) end_year = int(input("Enter End Year(yyyy):")) print ("\n") validity = validdt(start_date,start_month,start_year,end_date,end_month,end_year,date_from,date_upto) if validity == False: print ("\nPlease enter valid start and end dates\n") #Conversion into datetime format d1 = date(start_year,start_month,start_date) d2 = date(end_year,end_month,end_date+1) d3 = date(end_year,end_month,end_date) #Reading and storing data from different files frames = [] for single_date in daterange(d1,d2): curr_date = str(convert_date(int(single_date.day),int(single_date.month),int(single_date.year))) curr_folder = str(str(int(single_date.year)%2000)+str(curr_date)) for letter in alpha: try: filename = str('IITI'+curr_date+letter+'.'+'16_.ismr') with open('/home/deathstroke/projects/IITI_GNSSR/data/%s/%s' %(curr_folder,filename)) as f: df = pd.read_csv(f,usecols=[1,2,5,22],names=['time','svid','elevation','TEC']) frames.append(df) except (IOError): df1 = copy.deepcopy(frames[len(frames)-1]) df1['time']=df['time']+3600 tec = ['nan' for each in df1['time']] df1['TEC'] = tec frames.append(df1) result =pd.concat(frames) result['t-hrs'] = gettime(result['time']) dfm = result.groupby('svid') svid = set() for elements in result['svid']: svid.add(elements) svid1 = sorted(svid) cnt = 0 while(cnt!=1): print ( '''Choose the satellite constellation whose data is required:- 1. GPS 2. GLONASS ''' ) constl = int(input(">> ")) if constl==1: for each in svid1: if each>37: svid.remove(each) svid2 = sorted(svid) n = 37 constl = 'gps' cnt=1 elif constl==2: for each in svid1: if each<38 or each>61: svid.remove(each) svid2 = sorted(svid) constl = 'glonass' n = 24 cnt=1 else: print ("\nPlease enter a valid input") #Calculating average data points for plotting sumtime = 0 count = 0 for each in svid2: dftemp = dfm.get_group(each) timedf = np.array(dftemp['time']) # tecdf = np.array(dftemp['TEC'],dtype=float) vtecdf = np.array(conv_vtec(dftemp['TEC'],dftemp['elevation']),dtype=float) sumtime = sumtime+(timedf.size-countnan(vtecdf)) count = count+1 avg = sumtime/count val = avg #Counting the number of plots count = 0 for each in svid2: dftemp = dfm.get_group(each) timedf = np.array(dftemp['t-hrs']) vtecdf = np.array(conv_vtec(dftemp['TEC'],dftemp['elevation']),dtype=float) if timedf.size-countnan(vtecdf)>val: count = count +1 #Plotting each satellite with datapoints greater than average clr = iter(plt.cm.rainbow(np.linspace(0,1,count))) handles = [] for each in svid2: dftemp = dfm.get_group(each) timedf = np.array(dftemp['t-hrs']) # tecdf = np.array(dftemp['TEC'],dtype=float) vtecdf = np.array(conv_vtec(dftemp['TEC'],dftemp['elevation']),dtype=float) if timedf.size-countnan(vtecdf)>val: cl = next(clr) plt.plot(timedf,vtecdf,label='%d' %each,c=cl) handles.append(str(each)) import csv with open('/media/deathstroke/Work/Study/Internship/IIT-I/iri_18865lst21may.txt') as f: irifile = csv.reader(f,delimiter=',') iritime = [] irivtec = [] for each in irifile: iritime.append(each[0]) irivtec.append(each[1]) plt.plot(iritime,irivtec,c='black',label='IRI data') # Ensure that the axis ticks only show up on the bottom and left of the plot. # Ticks on the right and top of the plot are generally unnecessary chartjunk. ax = plt.subplot(111) ax.spines["top"].set_visible(False) # ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) # ax.spines["left"].set_visible(False) # Ensure that the axis ticks only show up on the bottom and left of the plot. # Ticks on the right and top of the plot are generally unnecessary chartjunk. ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.xlabel('Time in hours(0 is 5:30 AM IST on %s)' %d1) plt.ylabel(r'Value of Vertical-TEC(in TECU x $\frac{10^{16}}{m^2}$)',fontsize=16) plt.title('VTEC calculated from TEC data collected from %s constellation for %s to %s \nShowing satellites with %d+ datapoints' %(constl.upper(),d1,d3,val)) plt.legend(bbox_to_anchor=(1, 1), loc='upper left',prop={'size':12}, borderaxespad=0.,frameon=False) plt.show() if __name__=="__main__": main()
mit
lenovor/scikit-learn
sklearn/preprocessing/label.py
35
28877
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Joel Nothman <joel.nothman@gmail.com> # Hamzeh Alsalhi <ha258@cornell.edu> # License: BSD 3 clause from collections import defaultdict import itertools import array import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..utils.fixes import np_version from ..utils.fixes import sparse_min_max from ..utils.fixes import astype from ..utils.fixes import in1d from ..utils import deprecated, column_or_1d from ..utils.validation import check_array from ..utils.validation import _num_samples from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'label_binarize', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', ] def _check_numpy_unicode_bug(labels): """Check that user is not subject to an old numpy bug Fixed in master before 1.7.0: https://github.com/numpy/numpy/pull/243 """ if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U': raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted" " on unicode data correctly. Please upgrade" " NumPy to use LabelEncoder with unicode inputs.") class LabelEncoder(BaseEstimator, TransformerMixin): """Encode labels with value between 0 and n_classes-1. Read more in the :ref:`User Guide <preprocessing_targets>`. Attributes ---------- classes_ : array of shape (n_class,) Holds the label for each class. Examples -------- `LabelEncoder` can be used to normalize labels. >>> from sklearn import preprocessing >>> le = preprocessing.LabelEncoder() >>> le.fit([1, 2, 2, 6]) LabelEncoder() >>> le.classes_ array([1, 2, 6]) >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS array([0, 0, 1, 2]...) >>> le.inverse_transform([0, 0, 1, 2]) array([1, 1, 2, 6]) It can also be used to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels. >>> le = preprocessing.LabelEncoder() >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) LabelEncoder() >>> list(le.classes_) ['amsterdam', 'paris', 'tokyo'] >>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS array([2, 2, 1]...) >>> list(le.inverse_transform([2, 2, 1])) ['tokyo', 'tokyo', 'paris'] """ def _check_fitted(self): if not hasattr(self, "classes_"): raise ValueError("LabelEncoder was not fitted yet.") def fit(self, y): """Fit label encoder Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. """ y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) self.classes_ = np.unique(y) return self def fit_transform(self, y): """Fit label encoder and return encoded labels Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) self.classes_, y = np.unique(y, return_inverse=True) return y def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ self._check_fitted() classes = np.unique(y) _check_numpy_unicode_bug(classes) if len(np.intersect1d(classes, self.classes_)) < len(classes): diff = np.setdiff1d(classes, self.classes_) raise ValueError("y contains new labels: %s" % str(diff)) return np.searchsorted(self.classes_, y) def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y : numpy array of shape [n_samples] """ self._check_fitted() diff = np.setdiff1d(y, np.arange(len(self.classes_))) if diff: raise ValueError("y contains new labels: %s" % str(diff)) y = np.asarray(y) return self.classes_[y] class LabelBinarizer(BaseEstimator, TransformerMixin): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. At learning time, this simply consists in learning one regressor or binary classifier per class. In doing so, one needs to convert multi-class labels to binary labels (belong or does not belong to the class). LabelBinarizer makes this process easy with the transform method. At prediction time, one assigns the class for which the corresponding model gave the greatest confidence. LabelBinarizer makes this easy with the inverse_transform method. Read more in the :ref:`User Guide <preprocessing_targets>`. Parameters ---------- neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False) True if the returned array from transform is desired to be in sparse CSR format. Attributes ---------- classes_ : array of shape [n_class] Holds the label for each class. y_type_ : str, Represents the type of the target data as evaluated by utils.multiclass.type_of_target. Possible type are 'continuous', 'continuous-multioutput', 'binary', 'multiclass', 'mutliclass-multioutput', 'multilabel-sequences', 'multilabel-indicator', and 'unknown'. multilabel_ : boolean True if the transformer was fitted on a multilabel rather than a multiclass set of labels. The ``multilabel_`` attribute is deprecated and will be removed in 0.18 sparse_input_ : boolean, True if the input data to transform is given as a sparse matrix, False otherwise. indicator_matrix_ : str 'sparse' when the input data to tansform is a multilable-indicator and is sparse, None otherwise. The ``indicator_matrix_`` attribute is deprecated as of version 0.16 and will be removed in 0.18 Examples -------- >>> from sklearn import preprocessing >>> lb = preprocessing.LabelBinarizer() >>> lb.fit([1, 2, 6, 4, 2]) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([1, 2, 4, 6]) >>> lb.transform([1, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) Binary targets transform to a column vector >>> lb = preprocessing.LabelBinarizer() >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) array([[1], [0], [0], [1]]) Passing a 2D matrix for multilabel classification >>> import numpy as np >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([0, 1, 2]) >>> lb.transform([0, 1, 2, 1]) array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) See also -------- label_binarize : function to perform the transform operation of LabelBinarizer with fixed classes. """ def __init__(self, neg_label=0, pos_label=1, sparse_output=False): if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if sparse_output and (pos_label == 0 or neg_label != 0): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) self.neg_label = neg_label self.pos_label = pos_label self.sparse_output = sparse_output @property @deprecated("Attribute ``indicator_matrix_`` is deprecated and will be " "removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` " "instead") def indicator_matrix_(self): return self.y_type_ == 'multilabel-indicator' @property @deprecated("Attribute ``multilabel_`` is deprecated and will be removed " "in 0.17. Use ``y_type_.startswith('multilabel')`` " "instead") def multilabel_(self): return self.y_type_.startswith('multilabel') def _check_fitted(self): if not hasattr(self, "classes_"): raise ValueError("LabelBinarizer was not fitted yet.") def fit(self, y): """Fit label binarizer Parameters ---------- y : numpy array of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : returns an instance of self. """ self.y_type_ = type_of_target(y) if 'multioutput' in self.y_type_: raise ValueError("Multioutput target data is not supported with " "label binarization") if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) self.sparse_input_ = sp.issparse(y) self.classes_ = unique_labels(y) return self def transform(self, y): """Transform multi-class labels to binary labels The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : numpy array or sparse matrix of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. """ self._check_fitted() y_is_multilabel = type_of_target(y).startswith('multilabel') if y_is_multilabel and not self.y_type_.startswith('multilabel'): raise ValueError("The object was not fitted with multilabel" " input.") return label_binarize(y, self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output) def inverse_transform(self, Y, threshold=None): """Transform binary labels back to multi-class labels Parameters ---------- Y : numpy array or sparse matrix with shape [n_samples, n_classes] Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float or None Threshold used in the binary and multi-label cases. Use 0 when: - Y contains the output of decision_function (classifier) Use 0.5 when: - Y contains the output of predict_proba If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y : numpy array or CSR matrix of shape [n_samples] Target values. Notes ----- In the case when the binary labels are fractional (probabilistic), inverse_transform chooses the class with the greatest value. Typically, this allows to use the output of a linear model's decision_function method directly as the input of inverse_transform. """ self._check_fitted() if threshold is None: threshold = (self.pos_label + self.neg_label) / 2. if self.y_type_ == "multiclass": y_inv = _inverse_binarize_multiclass(Y, self.classes_) else: y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold) if self.sparse_input_: y_inv = sp.csr_matrix(y_inv) elif sp.issparse(y_inv): y_inv = y_inv.toarray() return y_inv def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False, multilabel=None): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like Sequence of integer labels or multilabel data to encode. classes : array-like of shape [n_classes] Uniquely holds the label for each class. neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]]) See also -------- LabelBinarizer : class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation """ if not isinstance(y, list): # XXX Workaround that will be removed when list of list format is # dropped y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None) else: if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if (sparse_output and (pos_label == 0 or neg_label != 0)): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) if multilabel is not None: warnings.warn("The multilabel parameter is deprecated as of version " "0.15 and will be removed in 0.17. The parameter is no " "longer necessary because the value is automatically " "inferred.", DeprecationWarning) # To account for pos_label == 0 in the dense case pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if 'multioutput' in y_type: raise ValueError("Multioutput target data is not supported with label " "binarization") n_samples = y.shape[0] if sp.issparse(y) else len(y) n_classes = len(classes) classes = np.asarray(classes) if y_type == "binary": if len(classes) == 1: Y = np.zeros((len(y), 1), dtype=np.int) Y += neg_label return Y elif len(classes) >= 3: y_type = "multiclass" sorted_class = np.sort(classes) if (y_type == "multilabel-indicator" and classes.size != y.shape[1]): raise ValueError("classes {0} missmatch with the labels {1}" "found in the data".format(classes, unique_labels(y))) if y_type in ("binary", "multiclass"): y = column_or_1d(y) # pick out the known labels from y y_in_classes = in1d(y, classes) y_seen = y[y_in_classes] indices = np.searchsorted(sorted_class, y_seen) indptr = np.hstack((0, np.cumsum(y_in_classes))) data = np.empty_like(indices) data.fill(pos_label) Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) elif y_type == "multilabel-indicator": Y = sp.csr_matrix(y) if pos_label != 1: data = np.empty_like(Y.data) data.fill(pos_label) Y.data = data elif y_type == "multilabel-sequences": Y = MultiLabelBinarizer(classes=classes, sparse_output=sparse_output).fit_transform(y) if sp.issparse(Y): Y.data[:] = pos_label else: Y[Y == 1] = pos_label return Y if not sparse_output: Y = Y.toarray() Y = astype(Y, int, copy=False) if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 else: Y.data = astype(Y.data, int, copy=False) # preserve label ordering if np.any(classes != sorted_class): indices = np.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y.getcol(-1) else: Y = Y[:, -1].reshape((-1, 1)) return Y def _inverse_binarize_multiclass(y, classes): """Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold. """ classes = np.asarray(classes) if sp.issparse(y): # Find the argmax for each row in y where y is a CSR matrix y = y.tocsr() n_samples, n_outputs = y.shape outputs = np.arange(n_outputs) row_max = sparse_min_max(y, 1)[1] row_nnz = np.diff(y.indptr) y_data_repeated_max = np.repeat(row_max, row_nnz) # picks out all indices obtaining the maximum per row y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) # For corner case where last row has a max of 0 if row_max[-1] == 0: y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) # Gets the index of the first argmax in each row from y_i_all_argmax index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) # first argmax of each row y_ind_ext = np.append(y.indices, [0]) y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] # Handle rows of all 0 y_i_argmax[np.where(row_nnz == 0)[0]] = 0 # Handles rows with max of 0 that contain negative numbers samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] for i in samples: ind = y.indices[y.indptr[i]:y.indptr[i + 1]] y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] return classes[y_i_argmax] else: return classes.take(y.argmax(axis=1), mode="clip") def _inverse_binarize_thresholding(y, output_type, classes, threshold): """Inverse label binarization transformation using thresholding.""" if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: raise ValueError("output_type='binary', but y.shape = {0}". format(y.shape)) if output_type != "binary" and y.shape[1] != len(classes): raise ValueError("The number of class is not equal to the number of " "dimension of y.") classes = np.asarray(classes) # Perform thresholding if sp.issparse(y): if threshold > 0: if y.format not in ('csr', 'csc'): y = y.tocsr() y.data = np.array(y.data > threshold, dtype=np.int) y.eliminate_zeros() else: y = np.array(y.toarray() > threshold, dtype=np.int) else: y = np.array(y > threshold, dtype=np.int) # Inverse transform data if output_type == "binary": if sp.issparse(y): y = y.toarray() if y.ndim == 2 and y.shape[1] == 2: return classes[y[:, 1]] else: if len(classes) == 1: y = np.empty(len(y), dtype=classes.dtype) y.fill(classes[0]) return y else: return classes[y.ravel()] elif output_type == "multilabel-indicator": return y elif output_type == "multilabel-sequences": warnings.warn('Direct support for sequence of sequences multilabel ' 'representation will be unavailable from version 0.17. ' 'Use sklearn.preprocessing.MultiLabelBinarizer to ' 'convert to a label indicator representation.', DeprecationWarning) mlb = MultiLabelBinarizer(classes=classes).fit([]) return mlb.inverse_transform(y) else: raise ValueError("{0} format is not supported".format(output_type)) class MultiLabelBinarizer(BaseEstimator, TransformerMixin): """Transform between iterable of iterables and a multilabel format Although a list of sets or tuples is a very intuitive format for multilabel data, it is unwieldy to process. This transformer converts between this intuitive format and the supported multilabel format: a (samples x classes) binary matrix indicating the presence of a class label. Parameters ---------- classes : array-like of shape [n_classes] (optional) Indicates an ordering for the class labels sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Attributes ---------- classes_ : array of labels A copy of the `classes` parameter where provided, or otherwise, the sorted set of classes found when fitting. Examples -------- >>> mlb = MultiLabelBinarizer() >>> mlb.fit_transform([(1, 2), (3,)]) array([[1, 1, 0], [0, 0, 1]]) >>> mlb.classes_ array([1, 2, 3]) >>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])]) array([[0, 1, 1], [1, 0, 0]]) >>> list(mlb.classes_) ['comedy', 'sci-fi', 'thriller'] """ def __init__(self, classes=None, sparse_output=False): self.classes = classes self.sparse_output = sparse_output def fit(self, y): """Fit the label sets binarizer, storing `classes_` Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : returns this MultiLabelBinarizer instance """ if self.classes is None: classes = sorted(set(itertools.chain.from_iterable(y))) else: classes = self.classes dtype = np.int if all(isinstance(c, int) for c in classes) else object self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self def fit_transform(self, y): """Fit the label sets binarizer and transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ if self.classes is not None: return self.fit(y).transform(y) # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = np.int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) yt.indices = np.take(inverse, yt.indices) if not self.sparse_output: yt = yt.toarray() return yt def transform(self, y): """Transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ class_to_index = dict(zip(self.classes_, range(len(self.classes_)))) yt = self._transform(y, class_to_index) if not self.sparse_output: yt = yt.toarray() return yt def _transform(self, y, class_mapping): """Transforms the label sets with a given mapping Parameters ---------- y : iterable of iterables class_mapping : Mapping Maps from label to column index in label indicator matrix Returns ------- y_indicator : sparse CSR matrix, shape (n_samples, n_classes) Label indicator matrix """ indices = array.array('i') indptr = array.array('i', [0]) for labels in y: indices.extend(set(class_mapping[label] for label in labels)) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) return sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping))) def inverse_transform(self, yt): """Transform the given indicator matrix into label sets Parameters ---------- yt : array or sparse matrix of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`. """ if yt.shape[1] != len(self.classes_): raise ValueError('Expected indicator for {0} classes, but got {1}' .format(len(self.classes_), yt.shape[1])) if sp.issparse(yt): yt = yt.tocsr() if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: raise ValueError('Expected only 0s and 1s in label indicator.') return [tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:])] else: unexpected = np.setdiff1d(yt, [0, 1]) if len(unexpected) > 0: raise ValueError('Expected only 0s and 1s in label indicator. ' 'Also got {0}'.format(unexpected)) return [tuple(self.classes_.compress(indicators)) for indicators in yt]
bsd-3-clause
Parallel-in-Time/pySDC
pySDC/playgrounds/deprecated/Dedalus/dynamo_playground.py
1
4660
import numpy as np import sys import matplotlib.pyplot as plt from mpi4py import MPI from pySDC.helpers.stats_helper import filter_stats, sort_stats from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right from pySDC.implementations.controller_classes.controller_MPI import controller_MPI from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order from pySDC.playgrounds.Dedalus.TransferDedalusFields import dedalus_field_transfer # from pySDC.playgrounds.Dedalus.Dynamo_2D_Dedalus import dynamo_2d_dedalus from pySDC.playgrounds.Dedalus.Dynamo_2D_Dedalus_NEW import dynamo_2d_dedalus from pySDC.playgrounds.Dedalus.Dynamo_monitor import monitor def main(): """ A simple test program to do PFASST runs for the heat equation """ # set MPI communicator comm = MPI.COMM_WORLD world_rank = comm.Get_rank() world_size = comm.Get_size() # split world communicator to create space-communicators if len(sys.argv) >= 2: color = int(world_rank / int(sys.argv[1])) else: color = int(world_rank / 1) space_comm = comm.Split(color=color) space_size = space_comm.Get_size() space_rank = space_comm.Get_rank() # split world communicator to create time-communicators if len(sys.argv) >= 2: color = int(world_rank % int(sys.argv[1])) else: color = int(world_rank / world_size) time_comm = comm.Split(color=color) time_size = time_comm.Get_size() time_rank = time_comm.Get_rank() print("IDs (world, space, time): %i / %i -- %i / %i -- %i / %i" % (world_rank, world_size, space_rank, space_size, time_rank, time_size)) # initialize level parameters level_params = dict() level_params['restol'] = 1E-08 level_params['dt'] = 0.5 level_params['nsweeps'] = [1] # initialize sweeper parameters sweeper_params = dict() sweeper_params['collocation_class'] = CollGaussRadau_Right sweeper_params['num_nodes'] = [3] sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part sweeper_params['initial_guess'] = 'zero' # initialize problem parameters problem_params = dict() problem_params['Rm'] = 4 problem_params['kz'] = 0.45 problem_params['initial'] = 'low-res' problem_params['nvars'] = [(64, 64)] # number of degrees of freedom for each level problem_params['comm'] = space_comm # initialize step parameters step_params = dict() step_params['maxiter'] = 50 # step_params['errtol'] = 1E-07 # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 20 if space_rank == 0 else 99 controller_params['hook_class'] = monitor # controller_params['use_iteration_estimator'] = True # fill description dictionary for easy step instantiation description = dict() description['problem_class'] = dynamo_2d_dedalus description['problem_params'] = problem_params # pass problem parameters description['sweeper_class'] = imex_1st_order description['sweeper_params'] = sweeper_params # pass sweeper parameters description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = dedalus_field_transfer # description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer # set time parameters t0 = 0.0 Tend = 10.0 # instantiate controller controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm) # get initial values on finest level P = controller.S.levels[0].prob uinit = P.u_exact(t0) # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) timings = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')[0][1] print(f'Time it took to run the simulation: {timings:6.3f} seconds') if space_size == 1: bx_maxes = sort_stats(filter_stats(stats, type='bx_max'), sortby='time') times = [t0 + i * level_params['dt'] for i in range(int((Tend - t0) / level_params['dt']) + 1)] half = int(len(times) / 2) gr = np.polyfit(times[half::], np.log([item[1] for item in bx_maxes])[half::], 1)[0] print("Growth rate: {:.3e}".format(gr)) plt.figure(3) plt.semilogy(times, [item[1] for item in bx_maxes]) plt.pause(0.1) if __name__ == "__main__": main()
bsd-2-clause
phdowling/scikit-learn
examples/neighbors/plot_species_kde.py
282
4059
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`example_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = -9999 + np.zeros(land_mask.shape[0]) Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
bsd-3-clause
kdebrab/pandas
pandas/tests/sparse/test_combine_concat.py
3
15360
# pylint: disable-msg=E1101,W0612 import pytest import numpy as np import pandas as pd import pandas.util.testing as tm import itertools class TestSparseSeriesConcat(object): def test_concat(self): val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) for kind in ['integer', 'block']: sparse1 = pd.SparseSeries(val1, name='x', kind=kind) sparse2 = pd.SparseSeries(val2, name='y', kind=kind) res = pd.concat([sparse1, sparse2]) exp = pd.concat([pd.Series(val1), pd.Series(val2)]) exp = pd.SparseSeries(exp, kind=kind) tm.assert_sp_series_equal(res, exp) sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind) sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind) res = pd.concat([sparse1, sparse2]) exp = pd.concat([pd.Series(val1), pd.Series(val2)]) exp = pd.SparseSeries(exp, fill_value=0, kind=kind) tm.assert_sp_series_equal(res, exp) def test_concat_axis1(self): val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) sparse1 = pd.SparseSeries(val1, name='x') sparse2 = pd.SparseSeries(val2, name='y') res = pd.concat([sparse1, sparse2], axis=1) exp = pd.concat([pd.Series(val1, name='x'), pd.Series(val2, name='y')], axis=1) exp = pd.SparseDataFrame(exp) tm.assert_sp_frame_equal(res, exp) def test_concat_different_fill(self): val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) for kind in ['integer', 'block']: sparse1 = pd.SparseSeries(val1, name='x', kind=kind) sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0) res = pd.concat([sparse1, sparse2]) exp = pd.concat([pd.Series(val1), pd.Series(val2)]) exp = pd.SparseSeries(exp, kind=kind) tm.assert_sp_series_equal(res, exp) res = pd.concat([sparse2, sparse1]) exp = pd.concat([pd.Series(val2), pd.Series(val1)]) exp = pd.SparseSeries(exp, kind=kind, fill_value=0) tm.assert_sp_series_equal(res, exp) def test_concat_axis1_different_fill(self): val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) sparse1 = pd.SparseSeries(val1, name='x') sparse2 = pd.SparseSeries(val2, name='y', fill_value=0) res = pd.concat([sparse1, sparse2], axis=1) exp = pd.concat([pd.Series(val1, name='x'), pd.Series(val2, name='y')], axis=1) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) def test_concat_different_kind(self): val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) sparse1 = pd.SparseSeries(val1, name='x', kind='integer') sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0) res = pd.concat([sparse1, sparse2]) exp = pd.concat([pd.Series(val1), pd.Series(val2)]) exp = pd.SparseSeries(exp, kind='integer') tm.assert_sp_series_equal(res, exp) res = pd.concat([sparse2, sparse1]) exp = pd.concat([pd.Series(val2), pd.Series(val1)]) exp = pd.SparseSeries(exp, kind='block', fill_value=0) tm.assert_sp_series_equal(res, exp) def test_concat_sparse_dense(self): # use first input's fill_value val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan]) val2 = np.array([3, np.nan, 4, 0, 0]) for kind in ['integer', 'block']: sparse = pd.SparseSeries(val1, name='x', kind=kind) dense = pd.Series(val2, name='y') res = pd.concat([sparse, dense]) exp = pd.concat([pd.Series(val1), dense]) exp = pd.SparseSeries(exp, kind=kind) tm.assert_sp_series_equal(res, exp) res = pd.concat([dense, sparse, dense]) exp = pd.concat([dense, pd.Series(val1), dense]) exp = pd.SparseSeries(exp, kind=kind) tm.assert_sp_series_equal(res, exp) sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0) dense = pd.Series(val2, name='y') res = pd.concat([sparse, dense]) exp = pd.concat([pd.Series(val1), dense]) exp = pd.SparseSeries(exp, kind=kind, fill_value=0) tm.assert_sp_series_equal(res, exp) res = pd.concat([dense, sparse, dense]) exp = pd.concat([dense, pd.Series(val1), dense]) exp = pd.SparseSeries(exp, kind=kind, fill_value=0) tm.assert_sp_series_equal(res, exp) class TestSparseDataFrameConcat(object): def setup_method(self, method): self.dense1 = pd.DataFrame({'A': [0., 1., 2., np.nan], 'B': [0., 0., 0., 0.], 'C': [np.nan, np.nan, np.nan, np.nan], 'D': [1., 2., 3., 4.]}) self.dense2 = pd.DataFrame({'A': [5., 6., 7., 8.], 'B': [np.nan, 0., 7., 8.], 'C': [5., 6., np.nan, np.nan], 'D': [np.nan, np.nan, np.nan, np.nan]}) self.dense3 = pd.DataFrame({'E': [5., 6., 7., 8.], 'F': [np.nan, 0., 7., 8.], 'G': [5., 6., np.nan, np.nan], 'H': [np.nan, np.nan, np.nan, np.nan]}) def test_concat(self): # fill_value = np.nan sparse = self.dense1.to_sparse() sparse2 = self.dense2.to_sparse() res = pd.concat([sparse, sparse]) exp = pd.concat([self.dense1, self.dense1]).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2, sparse2]) exp = pd.concat([self.dense2, self.dense2]).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse, sparse2]) exp = pd.concat([self.dense1, self.dense2]).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2, sparse]) exp = pd.concat([self.dense2, self.dense1]).to_sparse() tm.assert_sp_frame_equal(res, exp) # fill_value = 0 sparse = self.dense1.to_sparse(fill_value=0) sparse2 = self.dense2.to_sparse(fill_value=0) res = pd.concat([sparse, sparse]) exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2, sparse2]) exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse, sparse2]) exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2, sparse]) exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) def test_concat_different_fill_value(self): # 1st fill_value will be used sparse = self.dense1.to_sparse() sparse2 = self.dense2.to_sparse(fill_value=0) res = pd.concat([sparse, sparse2]) exp = pd.concat([self.dense1, self.dense2]).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2, sparse]) exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) def test_concat_different_columns_sort_warns(self): sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse() with tm.assert_produces_warning(FutureWarning): res = pd.concat([sparse, sparse3]) with tm.assert_produces_warning(FutureWarning): exp = pd.concat([self.dense1, self.dense3]) exp = exp.to_sparse() tm.assert_sp_frame_equal(res, exp) def test_concat_different_columns(self): # fill_value = np.nan sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse() res = pd.concat([sparse, sparse3], sort=True) exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse3, sparse], sort=True) exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse() exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # fill_value = 0 sparse = self.dense1.to_sparse(fill_value=0) sparse3 = self.dense3.to_sparse(fill_value=0) res = pd.concat([sparse, sparse3], sort=True) exp = (pd.concat([self.dense1, self.dense3], sort=True) .to_sparse(fill_value=0)) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse3, sparse], sort=True) exp = (pd.concat([self.dense3, self.dense1], sort=True) .to_sparse(fill_value=0)) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # different fill values sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse(fill_value=0) # each columns keeps its fill_value, thus compare in dense res = pd.concat([sparse, sparse3], sort=True) exp = pd.concat([self.dense1, self.dense3], sort=True) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) res = pd.concat([sparse3, sparse], sort=True) exp = pd.concat([self.dense3, self.dense1], sort=True) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) def test_concat_series(self): # fill_value = np.nan sparse = self.dense1.to_sparse() sparse2 = self.dense2.to_sparse() for col in ['A', 'D']: res = pd.concat([sparse, sparse2[col]]) exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2[col], sparse]) exp = pd.concat([self.dense2[col], self.dense1]).to_sparse() tm.assert_sp_frame_equal(res, exp) # fill_value = 0 sparse = self.dense1.to_sparse(fill_value=0) sparse2 = self.dense2.to_sparse(fill_value=0) for col in ['C', 'D']: res = pd.concat([sparse, sparse2[col]]) exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse2[col], sparse]) exp = pd.concat([self.dense2[col], self.dense1]).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) def test_concat_axis1(self): # fill_value = np.nan sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse() res = pd.concat([sparse, sparse3], axis=1) exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse() tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse3, sparse], axis=1) exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse() exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # fill_value = 0 sparse = self.dense1.to_sparse(fill_value=0) sparse3 = self.dense3.to_sparse(fill_value=0) res = pd.concat([sparse, sparse3], axis=1) exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) res = pd.concat([sparse3, sparse], axis=1) exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse(fill_value=0) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # different fill values sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse(fill_value=0) # each columns keeps its fill_value, thus compare in dense res = pd.concat([sparse, sparse3], axis=1) exp = pd.concat([self.dense1, self.dense3], axis=1) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) res = pd.concat([sparse3, sparse], axis=1) exp = pd.concat([self.dense3, self.dense1], axis=1) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) @pytest.mark.parametrize('fill_value,sparse_idx,dense_idx', itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0])) def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx): frames = [self.dense1, self.dense2] sparse_frame = [frames[dense_idx], frames[sparse_idx].to_sparse(fill_value=fill_value)] dense_frame = [frames[dense_idx], frames[sparse_idx]] # This will try both directions sparse + dense and dense + sparse for _ in range(2): res = pd.concat(sparse_frame) exp = pd.concat(dense_frame) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) sparse_frame = sparse_frame[::-1] dense_frame = dense_frame[::-1] @pytest.mark.parametrize('fill_value,sparse_idx,dense_idx', itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0])) def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx): # See GH16874, GH18914 and #18686 for why this should be a DataFrame frames = [self.dense1, self.dense3] sparse_frame = [frames[dense_idx], frames[sparse_idx].to_sparse(fill_value=fill_value)] dense_frame = [frames[dense_idx], frames[sparse_idx]] # This will try both directions sparse + dense and dense + sparse for _ in range(2): res = pd.concat(sparse_frame, axis=1) exp = pd.concat(dense_frame, axis=1) for column in frames[dense_idx].columns: if dense_idx == sparse_idx: tm.assert_frame_equal(res[column], exp[column]) else: tm.assert_series_equal(res[column], exp[column]) tm.assert_frame_equal(res, exp) sparse_frame = sparse_frame[::-1] dense_frame = dense_frame[::-1]
bsd-3-clause
JsNoNo/scikit-learn
sklearn/cluster/spectral.py
233
18153
# -*- coding: utf-8 -*- """Algorithms for spectral clustering""" # Author: Gael Varoquaux gael.varoquaux@normalesup.org # Brian Cheung # Wei LI <kuantkid@gmail.com> # License: BSD 3 clause import warnings import numpy as np from ..base import BaseEstimator, ClusterMixin from ..utils import check_random_state, as_float_array from ..utils.validation import check_array from ..utils.extmath import norm from ..metrics.pairwise import pairwise_kernels from ..neighbors import kneighbors_graph from ..manifold import spectral_embedding from .k_means_ import k_means def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None): """Search for a partition matrix (clustering) which is closest to the eigenvector embedding. Parameters ---------- vectors : array-like, shape: (n_samples, n_clusters) The embedding space of the samples. copy : boolean, optional, default: True Whether to copy vectors, or perform in-place normalization. max_svd_restarts : int, optional, default: 30 Maximum number of attempts to restart SVD if convergence fails n_iter_max : int, optional, default: 30 Maximum number of iterations to attempt in rotation and partition matrix search if machine precision convergence is not reached random_state: int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the of the rotation matrix Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ----- The eigenvector embedding is used to iteratively search for the closest discrete partition. First, the eigenvector embedding is normalized to the space of partition matrices. An optimal discrete partition matrix closest to this normalized embedding multiplied by an initial rotation is calculated. Fixing this discrete partition matrix, an optimal rotation matrix is calculated. These two calculations are performed until convergence. The discrete partition matrix is returned as the clustering solution. Used in spectral clustering, this method tends to be faster and more robust to random initialization than k-means. """ from scipy.sparse import csc_matrix from scipy.linalg import LinAlgError random_state = check_random_state(random_state) vectors = as_float_array(vectors, copy=copy) eps = np.finfo(float).eps n_samples, n_components = vectors.shape # Normalize the eigenvectors to an equal length of a vector of ones. # Reorient the eigenvectors to point in the negative direction with respect # to the first element. This may have to do with constraining the # eigenvectors to lie in a specific quadrant to make the discretization # search easier. norm_ones = np.sqrt(n_samples) for i in range(vectors.shape[1]): vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \ * norm_ones if vectors[0, i] != 0: vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i]) # Normalize the rows of the eigenvectors. Samples should lie on the unit # hypersphere centered at the origin. This transforms the samples in the # embedding space to the space of partition matrices. vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis] svd_restarts = 0 has_converged = False # If there is an exception we try to randomize and rerun SVD again # do this max_svd_restarts times. while (svd_restarts < max_svd_restarts) and not has_converged: # Initialize first column of rotation matrix with a row of the # eigenvectors rotation = np.zeros((n_components, n_components)) rotation[:, 0] = vectors[random_state.randint(n_samples), :].T # To initialize the rest of the rotation matrix, find the rows # of the eigenvectors that are as orthogonal to each other as # possible c = np.zeros(n_samples) for j in range(1, n_components): # Accumulate c to ensure row is as orthogonal as possible to # previous picks as well as current one c += np.abs(np.dot(vectors, rotation[:, j - 1])) rotation[:, j] = vectors[c.argmin(), :].T last_objective_value = 0.0 n_iter = 0 while not has_converged: n_iter += 1 t_discrete = np.dot(vectors, rotation) labels = t_discrete.argmax(axis=1) vectors_discrete = csc_matrix( (np.ones(len(labels)), (np.arange(0, n_samples), labels)), shape=(n_samples, n_components)) t_svd = vectors_discrete.T * vectors try: U, S, Vh = np.linalg.svd(t_svd) svd_restarts += 1 except LinAlgError: print("SVD did not converge, randomizing and trying again") break ncut_value = 2.0 * (n_samples - S.sum()) if ((abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max)): has_converged = True else: # otherwise calculate rotation and continue last_objective_value = ncut_value rotation = np.dot(Vh.T, U.T) if not has_converged: raise LinAlgError('SVD did not converge') return labels def spectral_clustering(affinity, n_clusters=8, n_components=None, eigen_solver=None, random_state=None, n_init=10, eigen_tol=0.0, assign_labels='kmeans'): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- affinity : array-like or sparse matrix, shape: (n_samples, n_samples) The affinity matrix describing the relationship of the samples to embed. **Must be symmetric**. Possible examples: - adjacency matrix of a graph, - heat kernel of the pairwise distance matrix of the samples, - symmetric k-nearest neighbours connectivity matrix of the samples. n_clusters : integer, optional Number of clusters to extract. n_components : integer, optional, default is n_clusters Number of eigen vectors to use for the spectral embedding eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. See the 'Multiclass spectral clustering' paper referenced below for more details on the discretization approach. Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ------ The graph should contain only one connect component, elsewhere the results make little sense. This algorithm solves the normalized cut for k=2: it is a normalized spectral clustering. """ if assign_labels not in ('kmeans', 'discretize'): raise ValueError("The 'assign_labels' parameter should be " "'kmeans' or 'discretize', but '%s' was given" % assign_labels) random_state = check_random_state(random_state) n_components = n_clusters if n_components is None else n_components maps = spectral_embedding(affinity, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, eigen_tol=eigen_tol, drop_first=False) if assign_labels == 'kmeans': _, labels, _ = k_means(maps, n_clusters, random_state=random_state, n_init=n_init) else: labels = discretize(maps, random_state=random_state) return labels class SpectralClustering(BaseEstimator, ClusterMixin): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. When calling ``fit``, an affinity matrix is constructed using either kernel function such the Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``:: np.exp(-gamma * d(X,X) ** 2) or a k-nearest neighbors connectivity matrix. Alternatively, using ``precomputed``, a user-provided affinity matrix can be used. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- n_clusters : integer, optional The dimension of the projection subspace. affinity : string, array-like or callable, default 'rbf' If a string, this may be one of 'nearest_neighbors', 'precomputed', 'rbf' or one of the kernels supported by `sklearn.metrics.pairwise_kernels`. Only kernels that produce similarity scores (non-negative values that increase with similarity) should be used. This property is not checked by the clustering algorithm. gamma : float Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel. Ignored for ``affinity='nearest_neighbors'``. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. n_neighbors : integer Number of neighbors to use when constructing the affinity matrix using the nearest neighbors method. Ignored for ``affinity='rbf'``. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. kernel_params : dictionary of string to any, optional Parameters (keyword arguments) and values for kernel passed as callable object. Ignored by other kernels. Attributes ---------- affinity_matrix_ : array-like, shape (n_samples, n_samples) Affinity matrix used for clustering. Available only if after calling ``fit``. labels_ : Labels of each point Notes ----- If you have an affinity matrix, such as a distance matrix, for which 0 means identical elements, and high values means very dissimilar elements, it can be transformed in a similarity matrix that is well suited for the algorithm by applying the Gaussian (RBF, heat) kernel:: np.exp(- X ** 2 / (2. * delta ** 2)) Another alternative is to take a symmetric version of the k nearest neighbors connectivity matrix of the points. If the pyamg package is installed, it is used: this greatly speeds up computation. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf """ def __init__(self, n_clusters=8, eigen_solver=None, random_state=None, n_init=10, gamma=1., affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None): self.n_clusters = n_clusters self.eigen_solver = eigen_solver self.random_state = random_state self.n_init = n_init self.gamma = gamma self.affinity = affinity self.n_neighbors = n_neighbors self.eigen_tol = eigen_tol self.assign_labels = assign_labels self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def fit(self, X, y=None): """Creates an affinity matrix for X using the selected affinity, then applies spectral clustering to this affinity matrix. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) OR, if affinity==`precomputed`, a precomputed affinity matrix of shape (n_samples, n_samples) """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) if X.shape[0] == X.shape[1] and self.affinity != "precomputed": warnings.warn("The spectral clustering API has changed. ``fit``" "now constructs an affinity matrix from data. To use" " a custom affinity matrix, " "set ``affinity=precomputed``.") if self.affinity == 'nearest_neighbors': connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True) self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == 'precomputed': self.affinity_matrix_ = X else: params = self.kernel_params if params is None: params = {} if not callable(self.affinity): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity, filter_params=True, **params) random_state = check_random_state(self.random_state) self.labels_ = spectral_clustering(self.affinity_matrix_, n_clusters=self.n_clusters, eigen_solver=self.eigen_solver, random_state=random_state, n_init=self.n_init, eigen_tol=self.eigen_tol, assign_labels=self.assign_labels) return self @property def _pairwise(self): return self.affinity == "precomputed"
bsd-3-clause
jjx02230808/project0223
examples/ensemble/plot_forest_iris.py
335
6271
""" ==================================================================== Plot the decision surfaces of ensembles of trees on the iris dataset ==================================================================== Plot the decision surfaces of forests of randomized trees trained on pairs of features of the iris dataset. This plot compares the decision surfaces learned by a decision tree classifier (first column), by a random forest classifier (second column), by an extra- trees classifier (third column) and by an AdaBoost classifier (fourth column). In the first row, the classifiers are built using the sepal width and the sepal length features only, on the second row using the petal length and sepal length only, and on the third row using the petal width and the petal length only. In descending order of quality, when trained (outside of this example) on all 4 features using 30 estimators and scored using 10 fold cross validation, we see:: ExtraTreesClassifier() # 0.95 score RandomForestClassifier() # 0.94 score AdaBoost(DecisionTree(max_depth=3)) # 0.94 score DecisionTree(max_depth=None) # 0.94 score Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but the average score does not improve). See the console's output for further details about each model. In this example you might try to: 1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and ``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the ``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier`` 2) vary ``n_estimators`` It is worth noting that RandomForests and ExtraTrees can be fitted in parallel on many cores as each tree is built independently of the others. AdaBoost's samples are built sequentially and so do not use multiple cores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import clone from sklearn.datasets import load_iris from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier) from sklearn.externals.six.moves import xrange from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 n_estimators = 30 plot_colors = "ryb" cmap = plt.cm.RdYlBu plot_step = 0.02 # fine step width for decision surface contours plot_step_coarser = 0.5 # step widths for coarse classifier guesses RANDOM_SEED = 13 # fix the seed on each iteration # Load data iris = load_iris() plot_idx = 1 models = [DecisionTreeClassifier(max_depth=None), RandomForestClassifier(n_estimators=n_estimators), ExtraTreesClassifier(n_estimators=n_estimators), AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators)] for pair in ([0, 1], [0, 2], [2, 3]): for model in models: # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(RANDOM_SEED) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train clf = clone(model) clf = model.fit(X, y) scores = clf.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")] model_details = model_title if hasattr(model, "estimators_"): model_details += " with {} estimators".format(len(model.estimators_)) print( model_details + " with features", pair, "has a score of", scores ) plt.subplot(3, 4, plot_idx) if plot_idx <= len(models): # Add a title at the top of each column plt.title(model_title) # Now plot the decision boundary using a fine mesh as input to a # filled contour plot x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # Plot either a single DecisionTreeClassifier or alpha blend the # decision surfaces of the ensemble of classifiers if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: # Choose alpha blend level with respect to the number of estimators # that are in use (noting that AdaBoost can use fewer estimators # than its maximum if it achieves a good enough fit early on) estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) # Build a coarser grid to plot a set of ensemble classifications # to show how these are different to what we see in the decision # surfaces. These points are regularly space and do not have a black outline xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser)) Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") # Plot the training points, these are clustered together and have a # black outline for i, c in zip(xrange(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i], cmap=cmap) plot_idx += 1 # move on to the next plot in sequence plt.suptitle("Classifiers on feature subsets of the Iris dataset") plt.axis("tight") plt.show()
bsd-3-clause
andaag/scikit-learn
sklearn/neighbors/approximate.py
128
22351
"""Approximate nearest neighbor search""" # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # Joel Nothman <joel.nothman@gmail.com> import numpy as np import warnings from scipy import sparse from .base import KNeighborsMixin, RadiusNeighborsMixin from ..base import BaseEstimator from ..utils.validation import check_array from ..utils import check_random_state from ..metrics.pairwise import pairwise_distances from ..random_projection import GaussianRandomProjection __all__ = ["LSHForest"] HASH_DTYPE = '>u4' MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8 def _find_matching_indices(tree, bin_X, left_mask, right_mask): """Finds indices in sorted array of integers. Most significant h bits in the binary representations of the integers are matched with the items' most significant h bits. """ left_index = np.searchsorted(tree, bin_X & left_mask) right_index = np.searchsorted(tree, bin_X | right_mask, side='right') return left_index, right_index def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks): """Find the longest prefix match in tree for each query in bin_X Most significant bits are considered as the prefix. """ hi = np.empty_like(bin_X, dtype=np.intp) hi.fill(hash_size) lo = np.zeros_like(bin_X, dtype=np.intp) res = np.empty_like(bin_X, dtype=np.intp) left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi]) found = right_idx > left_idx res[found] = lo[found] = hash_size r = np.arange(bin_X.shape[0]) kept = r[lo < hi] # indices remaining in bin_X mask while kept.shape[0]: mid = (lo.take(kept) + hi.take(kept)) // 2 left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid]) found = right_idx > left_idx mid_found = mid[found] lo[kept[found]] = mid_found + 1 res[kept[found]] = mid_found hi[kept[~found]] = mid[~found] kept = r[lo < hi] return res class ProjectionToHashMixin(object): """Turn a transformed real-valued array into a hash""" @staticmethod def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1) def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X, y=None): return self._to_hash(super(ProjectionToHashMixin, self).transform(X)) class GaussianRandomProjectionHash(ProjectionToHashMixin, GaussianRandomProjection): """Use GaussianRandomProjection to produce a cosine LSH fingerprint""" def __init__(self, n_components=8, random_state=None): super(GaussianRandomProjectionHash, self).__init__( n_components=n_components, random_state=random_state) def _array_of_arrays(list_of_arrays): """Creates an array of array from list of arrays.""" out = np.empty(len(list_of_arrays), dtype=object) out[:] = list_of_arrays return out class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin): """Performs approximate nearest neighbor search using LSH forest. LSH Forest: Locality Sensitive Hashing forest [1] is an alternative method for vanilla approximate nearest neighbor search methods. LSH forest data structure has been implemented using sorted arrays and binary search and 32 bit fixed-length hashes. Random projection is used as the hash family which approximates cosine distance. The cosine distance is defined as ``1 - cosine_similarity``: the lowest value is 0 (identical point) but it is bounded above by 2 for the farthest points. Its value does not depend on the norm of the vector points but only on their relative angles. Read more in the :ref:`User Guide <approximate_nearest_neighbors>`. Parameters ---------- n_estimators : int (default = 10) Number of trees in the LSH Forest. min_hash_match : int (default = 4) lowest hash length to be searched when candidate selection is performed for nearest neighbors. n_candidates : int (default = 10) Minimum number of candidates evaluated per estimator, assuming enough items meet the `min_hash_match` constraint. n_neighbors : int (default = 5) Number of neighbors to be returned from query function when it is not provided to the :meth:`kneighbors` method. radius : float, optinal (default = 1.0) Radius from the data point to its neighbors. This is the parameter space to use by default for the :meth`radius_neighbors` queries. radius_cutoff_ratio : float, optional (default = 0.9) A value ranges from 0 to 1. Radius neighbors will be searched until the ratio between total neighbors within the radius and the total candidates becomes less than this value unless it is terminated by hash length reaching `min_hash_match`. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- hash_functions_ : list of GaussianRandomProjectionHash objects Hash function g(p,x) for a tree is an array of 32 randomly generated float arrays with the same dimenstion as the data set. This array is stored in GaussianRandomProjectionHash object and can be obtained from ``components_`` attribute. trees_ : array, shape (n_estimators, n_samples) Each tree (corresponding to a hash function) contains an array of sorted hashed values. The array representation may change in future versions. original_indices_ : array, shape (n_estimators, n_samples) Original indices of sorted hashed values in the fitted index. References ---------- .. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning Indexes for Similarity Search", WWW '05 Proceedings of the 14th international conference on World Wide Web, 651-660, 2005. Examples -------- >>> from sklearn.neighbors import LSHForest >>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]] >>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]] >>> lshf = LSHForest() >>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10, n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9, random_state=None) >>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2) >>> distances # doctest: +ELLIPSIS array([[ 0.069..., 0.149...], [ 0.229..., 0.481...], [ 0.004..., 0.014...]]) >>> indices array([[1, 2], [2, 0], [4, 0]]) """ def __init__(self, n_estimators=10, radius=1.0, n_candidates=50, n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9, random_state=None): self.n_estimators = n_estimators self.radius = radius self.random_state = random_state self.n_candidates = n_candidates self.n_neighbors = n_neighbors self.min_hash_match = min_hash_match self.radius_cutoff_ratio = radius_cutoff_ratio def _compute_distances(self, query, candidates): """Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances. """ if candidates.shape == (0,): # needed since _fit_X[np.array([])] doesn't work if _fit_X sparse return np.empty(0, dtype=np.int), np.empty(0, dtype=float) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate_X, metric='cosine')[0] distance_positions = np.argsort(distances) distances = distances.take(distance_positions, mode='clip', axis=0) return distance_positions, distances def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE) def _get_candidates(self, query, max_depth, bin_queries, n_neighbors): """Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances. """ index_size = self._fit_X.shape[0] # Number of candidates considered including duplicates # XXX: not sure whether this is being calculated correctly wrt # duplicates from different iterations through a single tree n_candidates = 0 candidate_set = set() min_candidates = self.n_candidates * self.n_estimators while (max_depth > self.min_hash_match and (n_candidates < min_candidates or len(candidate_set) < n_neighbors)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += stop - start candidate_set.update( self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) # For insufficient candidates, candidates are filled. # Candidates are filled from unselected indices uniformly. if candidates.shape[0] < n_neighbors: warnings.warn( "Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (n_neighbors, self.min_hash_match)) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = n_neighbors - candidates.shape[0] candidates = np.concatenate((candidates, remaining[:to_fill])) ranks, distances = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors]) def _get_radius_neighbors(self, query, max_depth, bin_queries, radius): """Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances. """ ratio_within_radius = 1 threshold = 1 - self.radius_cutoff_ratio total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while (max_depth > self.min_hash_match and ratio_within_radius > threshold): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend( self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) ranks, distances = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = max_depth - 1 return total_neighbors, total_distances def fit(self, X, y=None): """Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self : object Returns self. """ self._fit_X = check_array(X, accept_sparse='csr') # Creates a g(p,x) for each tree self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): # This is g(p,x) for a particular tree. # Builds a single tree. Hashing is done on an array of data points. # `GaussianRandomProjection` is used for hashing. # `n_components=hash size and n_features=n_dim. hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self def _query(self, X): """Performs descending phase to find maximum depth.""" # Calculate hashes of shape (n_samples, n_estimators, [hash_size]) bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) # descend phase depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for tree, tree_queries in zip(self.trees_, np.rollaxis(bin_queries, 1))] return bin_queries, np.max(depths, axis=0) def kneighbors(self, X, n_neighbors=None, return_distance=True): """Returns n_neighbors of approximate nearest neighbors. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, opitonal (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if n_neighbors is None: n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_candidates(X[i], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return np.array(distances), np.array(neighbors) else: return np.array(neighbors) def radius_neighbors(self, X, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. LSH Forest being an approximate method, some true neighbors from the indexed dataset might be missing from the results. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples,) of arrays Each element is an array representing the cosine distances to some points found within ``radius`` of the respective query. Only present if ``return_distance=True``. ind : array, shape (n_samples,) of arrays Each element is an array of indices for neighbors within ``radius`` of the respective query. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if radius is None: radius = self.radius X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_radius_neighbors(X[i], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return _array_of_arrays(distances), _array_of_arrays(neighbors) else: return _array_of_arrays(neighbors) def partial_fit(self, X, y=None): """ Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest. """ X = check_array(X, accept_sparse='csr') if not hasattr(self, 'hash_functions_'): return self.fit(X) if X.shape[1] != self._fit_X.shape[1]: raise ValueError("Number of features in X and" " fitted array does not match.") n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] # gets the position to be added in the tree. positions = self.trees_[i].searchsorted(bin_X) # adds the hashed value into the tree. self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) # add the entry into the original_indices_. self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, n_indexed + n_samples)) # adds the entry into the input_array. if sparse.issparse(X) or sparse.issparse(self._fit_X): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
bsd-3-clause
jacenkow/beard-server
beard_server/modules/predictor/arxiv.py
2
8502
# -*- coding: utf-8 -*- # # This file is part of Inspire. # Copyright (C) 2016 CERN. # # Inspire is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Inspire is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Inspire; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Automatic selection of arXiv papers for inclusion in Inspire.""" from __future__ import ( absolute_import, division, print_function) import cPickle as pickle import numpy as np from beard.utils import FuncTransformer from beard.utils import Shaper from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.grid_search import GridSearchCV from sklearn.metrics import euclidean_distances from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.preprocessing import Normalizer from sklearn.svm import LinearSVC def _get_title(r): return r["title"] if r["title"] else "" def _get_abstract(r): return r["abstract"] if r["abstract"] else "" def _get_title_abstract(r): return _get_title(r) + " " + _get_abstract(r) def _get_categories(r): return " ".join(r["categories"]) def train(records, use_categories=True): """Train a classifier on the given arXiv records. :param records: Records are expected as a list of dictionaries with the following fields required: "title", "abstract", "categories" and "decision". The decision field should be either "CORE", "Non-CORE" or "Rejected". Example: records = [{u'decision': "CORE", u'title': u'Effects of top compositeness', u'abstract': u'We investigate the effects of (...)' u'categories': [u'cond-mat.mes-hall', u'cond-mat.mtrl-sci']}, {...}, ...] :param use_categories: Whether the "categories" is used to build the classifier. :return: the trained pipeline """ records = np.array(records, dtype=np.object).reshape((-1, 1)) if use_categories: transformer = Pipeline([ ("features", FeatureUnion([ ("title_abstract", Pipeline([ ("getter", FuncTransformer(func=_get_title_abstract)), ("shape", Shaper(newshape=(-1,))), ("tfidf", TfidfVectorizer(min_df=3, max_df=0.1, norm="l2", ngram_range=(1, 1), stop_words="english", strip_accents="unicode", dtype=np.float32, decode_error="replace"))])), ("categories", Pipeline([ ("getter", FuncTransformer(func=_get_categories)), ("shape", Shaper(newshape=(-1,))), ("tfidf", TfidfVectorizer(norm="l2", dtype=np.float32, decode_error="replace"))])), ])), ("scaling", Normalizer()) ]) else: transformer = Pipeline([ ("getter", FuncTransformer(func=_get_title_abstract)), ("shape", Shaper(newshape=(-1,))), ("tfidf", TfidfVectorizer(min_df=3, max_df=0.1, norm="l2", ngram_range=(1, 1), stop_words="english", strip_accents="unicode", dtype=np.float32, decode_error="replace")), ("scaling", Normalizer()) ]) X = transformer.fit_transform(records) y = np.array([r[0]["decision"] for r in records]) grid = GridSearchCV(LinearSVC(), param_grid={"C": np.linspace(start=0.2, stop=0.5, num=20)}, scoring="accuracy", cv=3, verbose=3) grid.fit(X, y) return Pipeline([("transformer", transformer), ("classifier", grid.best_estimator_)]) def predict(pipeline, record, top_words=0): """Predict whether the given record is CORE/Non-CORE/Rejected. :param pipeline: A classification pipeline, as returned by ``train``. :param record: Record is expected as a dictionary with the following fields required: "title", "abstract", "categories". Example: record = {u'title': u'Effects of top compositeness', u'abstract': u'We investigate the effects of (...)' u'categories': [u'cond-mat.mes-hall', u'cond-mat.mtrl-sci']} :param top_words: The top words explaining the classifier decision. :return decision, scores: decision: CORE, Non-CORE or Rejected, as the argmax of scores scores: the decision scores if ``top_words > 0``, then ``top_core``, ``top_noncore`` and ``top_rejected`` are additionally returned. Each is a list of ``top_words`` (word, weight) pairs corresponding to the words explaining the classifier decision. Example: (u'Rejected', array([-1.25554232, -1.2591557, 1.17074973])) """ transformer = pipeline.steps[0][1] classifier = pipeline.steps[1][1] X = transformer.transform(np.array([[record]], dtype=np.object)) decision = classifier.predict(X)[0] scores = classifier.decision_function(X)[0] if top_words == 0: return decision, scores else: top_core, top_noncore, top_rejected = [], [], [] if len(transformer.steps) == 2: tf1 = transformer.steps[0][1].transformer_list[0][1].steps[2][1] tf2 = transformer.steps[0][1].transformer_list[1][1].steps[2][1] inv_vocabulary = {v: k for k, v in tf1.vocabulary_.items()} inv_vocabulary.update({v + len(tf1.vocabulary_): k for k, v in tf2.vocabulary_.items()}) else: tf1 = transformer.steps[2][1] inv_vocabulary = {v: k for k, v in tf1.vocabulary_.items()} for i, j in zip(*X.nonzero()): top_core.append((inv_vocabulary[j], classifier.coef_[0][j] * X[0, j])) top_noncore.append((inv_vocabulary[j], classifier.coef_[1][j] * X[0, j])) top_rejected.append((inv_vocabulary[j], classifier.coef_[2][j] * X[0, j])) top_core = sorted(top_core, reverse=True, key=lambda x: x[1])[:top_words] top_noncore = sorted(top_noncore, reverse=True, key=lambda x: x[1])[:top_words] top_rejected = sorted(top_rejected, reverse=True, key=lambda x: x[1])[:top_words] return decision, scores, top_core, top_noncore, top_rejected def closest(pipeline, records, record, n=10): """Find the closest records from the given record. :param pipeline: A classification pipeline, as returned by ``train``. :param records: Records are expected as a list of dictionaries. :param record: Record is expected as a dictionary. :param n: The number of closest records to return. :return list: The ``n`` closest records. """ transformer = pipeline.steps[0][1] X = transformer.transform(np.array(records, dtype=np.object)) X_record = transformer.transform(np.array([record], dtype=np.object)) top = np.argsort(euclidean_distances(X, X_record), axis=0) return [records[i] for i in top[:n]]
gpl-2.0
gfyoung/pandas
pandas/tests/scalar/test_na_scalar.py
4
7335
import pickle import numpy as np import pytest from pandas._libs.missing import NA from pandas.core.dtypes.common import is_scalar import pandas as pd import pandas._testing as tm def test_singleton(): assert NA is NA new_NA = type(NA)() assert new_NA is NA def test_repr(): assert repr(NA) == "<NA>" assert str(NA) == "<NA>" def test_format(): # GH-34740 assert format(NA) == "<NA>" assert format(NA, ">10") == " <NA>" assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec assert f"{NA}" == "<NA>" assert f"{NA:>10}" == " <NA>" assert f"{NA:xxx}" == "<NA>" def test_truthiness(): msg = "boolean value of NA is ambiguous" with pytest.raises(TypeError, match=msg): bool(NA) with pytest.raises(TypeError, match=msg): not NA def test_hashable(): assert hash(NA) == hash(NA) d = {NA: "test"} assert d[NA] == "test" def test_arithmetic_ops(all_arithmetic_functions): op = all_arithmetic_functions for other in [NA, 1, 1.0, "a", np.int64(1), np.nan]: if op.__name__ in ("pow", "rpow", "rmod") and isinstance(other, str): continue if op.__name__ in ("divmod", "rdivmod"): assert op(NA, other) is (NA, NA) else: if op.__name__ == "rpow": # avoid special case other += 1 assert op(NA, other) is NA def test_comparison_ops(): for other in [NA, 1, 1.0, "a", np.int64(1), np.nan, np.bool_(True)]: assert (NA == other) is NA assert (NA != other) is NA assert (NA > other) is NA assert (NA >= other) is NA assert (NA < other) is NA assert (NA <= other) is NA assert (other == NA) is NA assert (other != NA) is NA assert (other > NA) is NA assert (other >= NA) is NA assert (other < NA) is NA assert (other <= NA) is NA @pytest.mark.parametrize( "value", [ 0, 0.0, -0, -0.0, False, np.bool_(False), np.int_(0), np.float_(0), np.int_(-0), np.float_(-0), ], ) @pytest.mark.parametrize("asarray", [True, False]) def test_pow_special(value, asarray): if asarray: value = np.array([value]) result = pd.NA ** value if asarray: result = result[0] else: # this assertion isn't possible for ndarray. assert isinstance(result, type(value)) assert result == 1 @pytest.mark.parametrize( "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)] ) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_special(value, asarray): if asarray: value = np.array([value]) result = value ** pd.NA if asarray: result = result[0] elif not isinstance(value, (np.float_, np.bool_, np.int_)): # this assertion isn't possible with asarray=True assert isinstance(result, type(value)) assert result == value @pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float_(-1)]) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_minus_one(value, asarray): if asarray: value = np.array([value]) result = value ** pd.NA if asarray: result = result[0] assert pd.isna(result) def test_unary_ops(): assert +NA is NA assert -NA is NA assert abs(NA) is NA assert ~NA is NA def test_logical_and(): assert NA & True is NA assert True & NA is NA assert NA & False is False assert False & NA is False assert NA & NA is NA msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): NA & 5 def test_logical_or(): assert NA | True is True assert True | NA is True assert NA | False is NA assert False | NA is NA assert NA | NA is NA msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): NA | 5 def test_logical_xor(): assert NA ^ True is NA assert True ^ NA is NA assert NA ^ False is NA assert False ^ NA is NA assert NA ^ NA is NA msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): NA ^ 5 def test_logical_not(): assert ~NA is NA @pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)]) def test_arithmetic_ndarray(shape, all_arithmetic_functions): op = all_arithmetic_functions a = np.zeros(shape) if op.__name__ == "pow": a += 5 result = op(pd.NA, a) expected = np.full(a.shape, pd.NA, dtype=object) tm.assert_numpy_array_equal(result, expected) def test_is_scalar(): assert is_scalar(NA) is True def test_isna(): assert pd.isna(NA) is True assert pd.notna(NA) is False def test_series_isna(): s = pd.Series([1, NA], dtype=object) expected = pd.Series([False, True]) tm.assert_series_equal(s.isna(), expected) def test_ufunc(): assert np.log(pd.NA) is pd.NA assert np.add(pd.NA, 1) is pd.NA result = np.divmod(pd.NA, 1) assert result[0] is pd.NA and result[1] is pd.NA result = np.frexp(pd.NA) assert result[0] is pd.NA and result[1] is pd.NA def test_ufunc_raises(): msg = "ufunc method 'at'" with pytest.raises(ValueError, match=msg): np.log.at(pd.NA, 0) def test_binary_input_not_dunder(): a = np.array([1, 2, 3]) expected = np.array([pd.NA, pd.NA, pd.NA], dtype=object) result = np.logaddexp(a, pd.NA) tm.assert_numpy_array_equal(result, expected) result = np.logaddexp(pd.NA, a) tm.assert_numpy_array_equal(result, expected) # all NA, multiple inputs assert np.logaddexp(pd.NA, pd.NA) is pd.NA result = np.modf(pd.NA, pd.NA) assert len(result) == 2 assert all(x is pd.NA for x in result) def test_divmod_ufunc(): # binary in, binary out. a = np.array([1, 2, 3]) expected = np.array([pd.NA, pd.NA, pd.NA], dtype=object) result = np.divmod(a, pd.NA) assert isinstance(result, tuple) for arr in result: tm.assert_numpy_array_equal(arr, expected) tm.assert_numpy_array_equal(arr, expected) result = np.divmod(pd.NA, a) for arr in result: tm.assert_numpy_array_equal(arr, expected) tm.assert_numpy_array_equal(arr, expected) def test_integer_hash_collision_dict(): # GH 30013 result = {NA: "foo", hash(NA): "bar"} assert result[NA] == "foo" assert result[hash(NA)] == "bar" def test_integer_hash_collision_set(): # GH 30013 result = {NA, hash(NA)} assert len(result) == 2 assert NA in result assert hash(NA) in result def test_pickle_roundtrip(): # https://github.com/pandas-dev/pandas/issues/31847 result = pickle.loads(pickle.dumps(pd.NA)) assert result is pd.NA def test_pickle_roundtrip_pandas(): result = tm.round_trip_pickle(pd.NA) assert result is pd.NA @pytest.mark.parametrize( "values, dtype", [([1, 2, pd.NA], "Int64"), (["A", "B", pd.NA], "string")] ) @pytest.mark.parametrize("as_frame", [True, False]) def test_pickle_roundtrip_containers(as_frame, values, dtype): s = pd.Series(pd.array(values, dtype=dtype)) if as_frame: s = s.to_frame(name="A") result = tm.round_trip_pickle(s) tm.assert_equal(result, s)
bsd-3-clause
VirusTotal/msticpy
msticpy/sectools/syslog_utils.py
1
9857
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ syslog_utils - Syslog parsing and utility module. Functions required to correct collect, parse and visualize syslog data. Designed to support standard linux syslog for investigations where auditd is not available. """ import datetime as dt from typing import Dict, Any import pytz import ipywidgets as widgets import pandas as pd from .._version import VERSION from ..nbtools.entityschema import GeoLocation, Host, IpAddress from ..common.exceptions import MsticpyException from ..common.utility import export from .ip_utils import convert_to_ip_entities __version__ = VERSION __author__ = "Pete Bryan" _DETECTIONS_DEF_DIR = "resources" WIDGET_DEFAULTS = { "layout": widgets.Layout(width="95%"), "style": {"description_width": "initial"}, } @export def create_host_record( syslog_df: pd.DataFrame, heartbeat_df: pd.DataFrame, az_net_df: pd.DataFrame = None ) -> Host: """ Generate host_entity record for selected computer. Parameters ---------- syslog_df : pd.DataFrame A dataframe of all syslog events for the host in the time window requried heartbeat_df : pd.DataFrame A dataframe of heartbeat data for the host az_net_df : pd.DataFrame Option dataframe of Azure network data for the host Returns ------- Host Details of the host data collected """ host_entity = Host(src_event=syslog_df.iloc[0]) applications = [] # Produce list of processes on the host that are not # part of a 'standard' linux distro _apps = syslog_df["ProcessName"].unique().tolist() for app in _apps: if app not in ( "CRON", "sudo", "snapd", "systemd-resolved", "systemd", "crontab", "systemd-timesyncd", "systemd-logind", "rsyslogd", "syslog-ng", ): applications.append(app) # Produce host_entity record mapping linux heartbeat elements to host_entity fields host_hb = heartbeat_df.iloc[0] host_entity.SourceComputerId = host_hb["SourceComputerId"] # type: ignore host_entity.OSType = host_hb["OSType"] # type: ignore host_entity.OSName = host_hb["OSName"] # type: ignore host_entity.OSVMajorersion = host_hb["OSMajorVersion"] # type: ignore host_entity.OSVMinorVersion = host_hb["OSMinorVersion"] # type: ignore host_entity.ComputerEnvironment = host_hb["ComputerEnvironment"] # type: ignore host_entity.OmsSolutions = [ # type: ignore sol.strip() for sol in host_hb["Solutions"].split(",") ] # type: ignore host_entity.Applications = applications # type: ignore host_entity.VMUUID = host_hb["VMUUID"] # type: ignore ip_entity = IpAddress() ip_entity.Address = host_hb["ComputerIP"] geoloc_entity = GeoLocation() geoloc_entity.CountryName = host_hb["RemoteIPCountry"] # type: ignore geoloc_entity.Longitude = host_hb["RemoteIPLongitude"] # type: ignore geoloc_entity.Latitude = host_hb["RemoteIPLatitude"] # type: ignore ip_entity.Location = geoloc_entity # type: ignore host_entity.IPAddress = ip_entity # type: ignore # If Azure network data present add this to host record if az_net_df is not None and not az_net_df.empty: if len(az_net_df) == 1: priv_addr_str = az_net_df["PrivateIPAddresses"].loc[0] host_entity["private_ips"] = convert_to_ip_entities(priv_addr_str) pub_addr_str = az_net_df["PublicIPAddresses"].loc[0] host_entity["public_ips"] = convert_to_ip_entities(pub_addr_str) else: if "private_ips" not in host_entity: host_entity["private_ips"] = [] if "public_ips" not in host_entity: host_entity["public_ips"] = [] return host_entity @export def cluster_syslog_logons_df(logon_events: pd.DataFrame) -> pd.DataFrame: """ Cluster logon sessions in syslog by start/end time based on PAM events. Parameters ---------- logon_events: pd.DataFrame A DataFrame of all syslog logon events (can be generated with LinuxSyslog.user_logon query) Returns ------- logon_sessions: pd.DataFrame A dictionary of logon sessions including start and end times and logged on user Raises ------ MsticpyException There are no logon sessions in the supplied data set """ users = [] starts = [] ends = [] ses_close_time = logon_events["TimeGenerated"].max() ses_opened = 0 ses_closed = 0 # Extract logon session opened and logon session closed data. logons_opened = ( ( logon_events[ logon_events["SyslogMessage"].str.contains("pam_unix.+session opened") ] ) .set_index("TimeGenerated") .sort_index(ascending=True) ) logons_closed = ( ( logon_events[ logon_events["SyslogMessage"].str.contains("pam_unix.+session closed") ] ) .set_index("TimeGenerated") .sort_index(ascending=True) ) if logons_opened.empty or logons_closed.empty: raise MsticpyException("There are no logon sessions in the supplied data set") # For each session identify the likely start and end times while ses_opened < len(logons_opened.index) and ses_closed < len( logons_closed.index ): ses_start = (logons_opened.iloc[ses_opened]).name ses_end = (logons_closed.iloc[ses_closed]).name # If we can identify a user for the session add this to the details if "User" in logons_opened.columns: user = (logons_opened.iloc[ses_opened]).User elif "Sudoer" in logons_opened.columns: user = (logons_opened.iloc[ses_opened]).Sudoer else: user = "Unknown" if ses_start > ses_close_time or ses_opened == 0: pass else: ses_opened += 1 continue if ses_end < ses_start: ses_closed += 1 continue users.append(user) starts.append(ses_start) ends.append(ses_end) ses_close_time = ses_end ses_closed = ses_closed + 1 ses_opened = ses_opened + 1 logon_sessions_df = pd.DataFrame({"User": users, "Start": starts, "End": ends}) return logon_sessions_df @export def risky_sudo_sessions( sudo_sessions: pd.DataFrame, risky_actions: dict = None, suspicious_actions: list = None, ) -> dict: """ Detect if a sudo session occurs at the point of a suspicious event. Parameters ---------- sudo_sessions: dict Dictionary of sudo sessions (as generated by cluster_syslog_logons) risky_actions: dict (Optional) Dictionary of risky sudo commands (as generated by cmd_line.risky_cmd_line) suspicious_actions: list (Optional) List of risky sudo commands (as generated by cmd_line.cmd_speed) Returns ------- risky_sessions: dict A dictionary of sudo sessions with flags denoting risk """ sessions = sudo_sessions[["User", "Start", "End"]].to_dict("index") if risky_actions is None and suspicious_actions is None: raise MsticpyException( "At least one of risky_actions or suspicious_actions must be supplied" ) # Depending on whether we have risky or suspicious acitons or both # identify sessions which these actions occur in risky_act_sessions: Dict[str, Any] = {} susp_act_sessions: Dict[str, Any] = {} if risky_actions is not None: risky_act_sessions = _find_risky_sudo_session( risky_actions=risky_actions, sudo_sessions=sessions ) if suspicious_actions is not None: susp_act_sessions = _find_suspicious_sudo_session( suspicious_actions=suspicious_actions, sudo_sessions=sessions ) return {**risky_act_sessions, **susp_act_sessions} def _normalize_to_utc(time_stamp: dt.datetime): # Normalize datetimes to UTC in case we have mixed timezones in datasets if time_stamp.tzinfo is None or time_stamp.tzinfo.utcoffset(time_stamp) is None: time_stamp = time_stamp.replace(tzinfo=pytz.UTC) else: time_stamp = time_stamp.astimezone(pytz.utc) return time_stamp def _find_risky_sudo_session(risky_actions: dict, sudo_sessions: dict): risky_sessions = {} # Determine if risky event occurs during a session time window for key, value in risky_actions.items(): for sess_key, sess_val in sudo_sessions.items(): if ( _normalize_to_utc(sess_val["Start"]) <= _normalize_to_utc(key) <= _normalize_to_utc(sess_val["End"]) ): risky_sessions.update({sess_key: value}) return risky_sessions def _find_suspicious_sudo_session(suspicious_actions: list, sudo_sessions: dict): risky_sessions = {} # Determine if suspicious event occurs during a session time window for event in suspicious_actions: for value in event.values(): for sess_key, sess_val in sudo_sessions.items(): if ( _normalize_to_utc(sess_val["Start"]) <= _normalize_to_utc(value[0]["TimeGenerated"].iloc[1]) <= _normalize_to_utc(sess_val["End"]) ): risky_sessions.update({sess_key: "Suspicious event pattern"}) return risky_sessions
mit
addfor/addutils
addutils/palette.py
1
5095
# The MIT License (MIT) # # Copyright (c) 2015 addfor s.r.l. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Color and color palette management helper functions. This modules provides some simple functions to help with the management and use of colors and color palettes. Although it was written to be used with Bokeh, it doesn't really have any dependency, and can be used anywhere else it could be useful. Functions: linear_map - map (linearly) a sequence of real values to the given palette sample_mpl_cmap - convert a Matplotlib-like colormap to a simple array of colors to_rgb_bytes - converts a color expressed as an RGB [0.0, 1.0]-ranged tuple to a RGB bytes (int 0-255) tuple to_hex - converts a color expressed as an RGB [0.0, 1.0]-ranged tuple to a hex representation #aabbcc Variables: mpl_cmap_jet - Colormap from Matplotlib: jet (deprecated) mpl_cmap_hot - Colormap from Matplotlib: hot jet_hex, hot_hex, jet_bytes, hot_bytes - *_hex: matplotlib colormap converted to hex representation *_bytes: matplotlib colormap converted to bytes (int 0-255) tuple """ mpl_cmap_jet = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1), (1, 0.5, 0.5)), 'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1), (0.91, 0, 0), (1, 0, 0)), 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65, 0, 0), (1, 0, 0))} mpl_cmap_hot = {'red': ((0. , 0.0416, 0.0416), (0.365079 , 1.000000, 1.000000), (1.0 , 1.0, 1.0)), 'green': ((0. , 0., 0.), (0.365079 , 0.000000, 0.000000), (0.746032 , 1.000000, 1.000000), (1.0 , 1.0, 1.0)), 'blue': ((0. , 0., 0.), (0.746032 , 0.000000, 0.000000), (1.0 , 1.0, 1.0))} def sample(channel, pos): try: idx_b = next((idx for idx, it in enumerate(channel) if it[0] >= pos)) except StopIteration: return channel[-1][1] idx_a = max(0, idx_b - 1) if idx_a == idx_b: return channel[idx_a][1] pos_a, val_a, _ = channel[idx_a] pos_b, val_b, _ = channel[idx_b] dx = (pos - pos_a) / (pos_b - pos_a) return val_a + dx * (val_b - val_a) def sample_mpl_cmap(cmap, nsamples): channels = list(map(list, [ cmap['red'], cmap['green'], cmap['blue'] ])) for chan in channels: # Sort stops by position chan.sort(key=lambda stop: stop[0]) positions = [ 1.0 / nsamples * i for i in range(nsamples+1) ] samples = [] for pos in positions: r, g, b = [sample(chan, pos) for chan in channels] samples.append((r,g,b)) return samples jet = sample_mpl_cmap(mpl_cmap_jet, 80) hot = sample_mpl_cmap(mpl_cmap_hot, 80) def to_rgb_bytes(rgb): r, g, b = rgb[:3] r = int(min(1, r) * 255) g = int(min(1, g) * 255) b = int(min(1, b) * 255) return (r,g,b) jet_rgb = list(map(to_rgb_bytes, jet)) hot_rgb = list(map(to_rgb_bytes, hot)) def to_hex(rgb): return "#%02x%02x%02x" % to_rgb_bytes(rgb) jet_hex = list(map(to_hex, jet)) hot_hex = list(map(to_hex, hot)) def linear_map(xs, palette, low=None, high=None): """Map (linearly) a sequence of real values to the given palette. Parameters: xs - A list of numbers, in the range [low, high] palette - A list of colors Returns: A list of the same size of xs, with the color of each sample """ if xs == []: return [] if low == None: low = min(xs) if high == None: high = max(xs) idx = lambda x: int( (float(x) - low) / (high - low) * (len(palette)-1) ) clamped = [ max(low, min(high, x)) for x in xs ] return [ palette[ idx(x) ] for x in clamped ]
mit
liangz0707/scikit-learn
sklearn/ensemble/tests/test_bagging.py
72
25573
""" Testing for the bagging ensemble module (sklearn.ensemble.bagging). """ # Author: Gilles Louppe # License: BSD 3 clause import numpy as np from sklearn.base import BaseEstimator from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.grid_search import GridSearchCV, ParameterGrid from sklearn.ensemble import BaggingClassifier, BaggingRegressor from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.svm import SVC, SVR from sklearn.pipeline import make_pipeline from sklearn.feature_selection import SelectKBest from sklearn.cross_validation import train_test_split from sklearn.datasets import load_boston, load_iris, make_hastie_10_2 from sklearn.utils import check_random_state from scipy.sparse import csc_matrix, csr_matrix rng = check_random_state(0) # also load the iris dataset # and randomly permute it iris = load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_classification(): # Check classification for various parameter settings on sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVC, self).fit(X, y) self.data_type_ = type(X) return self rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']: # Trained on sparse format sparse_classifier = BaggingClassifier( base_estimator=CustomSVC(), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = getattr(sparse_classifier, f)(X_test_sparse) # Trained on dense format dense_classifier = BaggingClassifier( base_estimator=CustomSVC(), random_state=1, **params ).fit(X_train, y_train) dense_results = getattr(dense_classifier, f)(X_test) assert_array_equal(sparse_results, dense_results) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([t == sparse_type for t in types]) def test_regression(): # Check regression for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [0.5, 1.0], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyRegressor(), DecisionTreeRegressor(), KNeighborsRegressor(), SVR()]: for params in grid: BaggingRegressor(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_regression(): # Check regression for various parameter settings on sparse input. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) class CustomSVR(SVR): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVR, self).fit(X, y) self.data_type_ = type(X) return self parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: # Trained on sparse format sparse_classifier = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_results = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train, y_train).predict(X_test) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert_array_equal(sparse_results, dense_results) assert all([t == sparse_type for t in types]) assert_array_equal(sparse_results, dense_results) def test_bootstrap_samples(): # Test that bootstraping samples generate non-perfect base estimators. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) base_estimator = DecisionTreeRegressor().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=False, random_state=rng).fit(X_train, y_train) assert_equal(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) # with bootstrap, trees are no longer perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=True, random_state=rng).fit(X_train, y_train) assert_greater(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) def test_bootstrap_features(): # Test that bootstraping features may generate dupplicate features. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=False, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_equal(boston.data.shape[1], np.unique(features).shape[0]) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=True, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_greater(boston.data.shape[1], np.unique(features).shape[0]) def test_probability(): # Predict probabilities. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(), random_state=rng).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) # Degenerate case, where some classes are missing ensemble = BaggingClassifier(base_estimator=LogisticRegression(), random_state=rng, max_samples=5).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) for base_estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier(base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingClassifier(base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_oob_score_regression(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=50, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_single_estimator(): # Check singleton ensembles. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=rng).fit(X_train, y_train) clf2 = KNeighborsRegressor().fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) def test_error(): # Test that it gives proper exception on deficient input. X, y = iris.data, iris.target base = DecisionTreeClassifier() # Test max_samples assert_raises(ValueError, BaggingClassifier(base, max_samples=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=1000).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples="foobar").fit, X, y) # Test max_features assert_raises(ValueError, BaggingClassifier(base, max_features=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=5).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features="foobar").fit, X, y) # Test support of decision_function assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function')) def test_parallel_classification(): # Check parallel classification. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) # predict_proba ensemble.set_params(n_jobs=1) y1 = ensemble.predict_proba(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y3) # decision_function ensemble = BaggingClassifier(SVC(), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) decisions1 = ensemble.decision_function(X_test) ensemble.set_params(n_jobs=2) decisions2 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions2) ensemble = BaggingClassifier(SVC(), n_jobs=1, random_state=0).fit(X_train, y_train) decisions3 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions3) def test_parallel_regression(): # Check parallel regression. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) y1 = ensemble.predict(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict(X_test) assert_array_almost_equal(y1, y3) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {'n_estimators': (1, 2), 'base_estimator__C': (1, 2)} GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, Perceptron)) # Regression X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, SVR)) def test_bagging_with_pipeline(): estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2) estimator.fit(iris.data, iris.target) class DummyZeroEstimator(BaseEstimator): def fit(self, X, y): self.classes_ = np.unique(y) return self def predict(self, X): return self.classes_[np.zeros(X.shape[0], dtype=int)] def test_bagging_sample_weight_unsupported_but_passed(): estimator = BaggingClassifier(DummyZeroEstimator()) rng = check_random_state(0) estimator.fit(iris.data, iris.target).predict(iris.data) assert_raises(ValueError, estimator.fit, iris.data, iris.target, sample_weight=rng.randint(10, size=(iris.data.shape[0]))) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BaggingClassifier(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1. assert_warns_message(UserWarning, "Warm-start fitting without increasing n_estimators does not", clf.fit, X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) assert_raises(ValueError, clf.fit, X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) assert_raises(AttributeError, getattr, clf, "oob_score_")
bsd-3-clause
BeiLuoShiMen/nupic
examples/opf/tools/MirrorImageViz/mirrorImageViz.py
50
7221
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- # Author: Surabhi Gupta import sys import numpy as np import matplotlib.pylab as pyl def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset): '''Mirror Image Visualization: Shows the encoding space juxtaposed against the coincidence space. The encoding space is the bottom-up sensory encoding and the coincidence space depicts the corresponding activation of coincidences in the SP. Hence, the mirror image visualization is a visual depiction of the mapping of SP cells to the input representations. Note: * The files spBUOut and sensorBUOut are assumed to be in the output format used for LPF experiment outputs. * BU outputs for some sample datasets are provided. Specify the name of the dataset as an option while running this script. ''' lines = activeCoincsFile.readlines() inputs = encodingsFile.readlines() w = len(inputs[0].split(' '))-1 patterns = set([]) encodings = set([]) coincs = [] #The set of all coincidences that have won at least once reUsedCoincs = [] firstLine = inputs[0].split(' ') size = int(firstLine.pop(0)) spOutput = np.zeros((len(lines),40)) inputBits = np.zeros((len(lines),w)) print 'Total n:', size print 'Total number of records in the file:', len(lines), '\n' print 'w:', w count = 0 for x in xrange(len(lines)): inputSpace = [] #Encoded representation for each input spBUout = [int(z) for z in lines[x].split(' ')] spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP temp = set(spBUout) spOutput[x]=spBUout input = [int(z) for z in inputs[x].split(' ')] input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space tempInput = set(input) inputBits[x]=input #Creating the encoding space for m in xrange(size): if m in tempInput: inputSpace.append(m) else: inputSpace.append('|') #A non-active bit repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active #Dividing the coincidences into two difference categories. if len(reUsed)==0: coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary) else: reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput)) patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once encodings = encodings.union(tempInput) count +=1 overlap = {} overlapVal = 0 seen = [] seen = (printOverlaps(coincs, coincs, seen)) print len(seen), 'sets of 40 cells' seen = printOverlaps(reUsedCoincs, coincs, seen) Summ=[] for z in coincs: c=0 for y in reUsedCoincs: c += len(z[1].intersection(y[1])) Summ.append(c) print 'Sum: ', Summ for m in xrange(3): displayLimit = min(51, len(spOutput[m*200:])) if displayLimit>0: drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1) else: print 'No more records to display' pyl.show() def drawFile(dataset, matrix, patterns, cells, w, fnum): '''The similarity of two patterns in the bit-encoding space is displayed alongside their similarity in the sp-coinc space.''' score=0 count = 0 assert len(patterns)==len(cells) for p in xrange(len(patterns)-1): matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]] matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]] score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:]))) count += len(matrix[p+1:,p]) print 'Score', score/count fig = pyl.figure(figsize = (10,10), num = fnum) pyl.matshow(matrix, fignum = fnum) pyl.colorbar() pyl.title('Coincidence Space', verticalalignment='top', fontsize=12) pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17) pyl.ylabel('Encoding space', fontsize=12) def printOverlaps(comparedTo, coincs, seen): """ Compare the results and return True if success, False if failure Parameters: -------------------------------------------------------------------- coincs: Which cells are we comparing? comparedTo: The set of 40 cells we being compared to (they have no overlap with seen) seen: Which of the cells we are comparing to have already been encountered. This helps glue together the unique and reused coincs """ inputOverlap = 0 cellOverlap = 0 for y in comparedTo: closestInputs = [] closestCells = [] if len(seen)>0: inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))]) cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))]) for m in xrange( len(seen) ): if len(seen[m][1].intersection(y[4]))==inputOverlap: closestInputs.append(seen[m][2]) if len(seen[m][0].intersection(y[1]))==cellOverlap: closestCells.append(seen[m][2]) seen.append((y[1], y[4], y[0])) print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \ 'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells) return seen if __name__=='__main__': if len(sys.argv)<2: #Use basil if no dataset specified print ('Input files required. Read documentation for details.') else: dataset = sys.argv[1] activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt' encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt' activeCoincsFile=open(activeCoincsPath, 'r') encodingsFile=open(encodingsPath, 'r') analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
agpl-3.0
marionleborgne/nupic.research
projects/sequence_prediction/continuous_sequence/run_adaptive_filter.py
12
5310
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2016, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import csv import math import operator from optparse import OptionParser import adaptfilt import numpy as np import pandas as pd from matplotlib import pyplot as plt plt.ion() def readDataSet(dataSet): filePath = 'data/' + dataSet + '.csv' if dataSet == 'nyc_taxi': df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data', 'timeofday', 'dayofweek']) sequence = df['data'] elif dataSet == 'sine': df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data']) sequence = df['data'] else: raise (' unrecognized dataset type ') return np.array(sequence) def _getArgs(): parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]" "\n\nCompare TM performance with trivial predictor using " "model outputs in prediction directory " "and outputting results to result directory.") parser.add_option("-d", "--dataSet", type=str, default='nyc_taxi', dest="dataSet", help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass") parser.add_option("-n", "--trainingDataSize", type=int, default=6000, dest="trainingDataSize", help="size of training dataset") (options, remainder) = parser.parse_args() print options return options, remainder def saveResultToFile(dataSet, predictedInput, algorithmName): inputFileName = 'data/' + dataSet + '.csv' inputFile = open(inputFileName, "rb") csvReader = csv.reader(inputFile) # skip header rows csvReader.next() csvReader.next() csvReader.next() outputFileName = './prediction/' + dataSet + '_' + algorithmName + '_pred.csv' outputFile = open(outputFileName, "w") csvWriter = csv.writer(outputFile) csvWriter.writerow( ['timestamp', 'data', 'prediction-' + str(predictionStep) + 'step']) csvWriter.writerow(['datetime', 'float', 'float']) csvWriter.writerow(['', '', '']) for i in xrange(len(sequence)): row = csvReader.next() csvWriter.writerow([row[0], row[1], predictedInput[i]]) inputFile.close() outputFile.close() def normalizeSequence(sequence): """ normalize sequence by subtracting the mean and :param sequence: a list of data samples :param considerDimensions: a list of dimensions to consider :return: normalized sequence """ seq = np.array(sequence).astype('float64') meanSeq = np.mean(seq) stdSeq = np.std(seq) seq = (seq - np.mean(seq)) / np.std(seq) sequence = seq.tolist() return sequence, meanSeq, stdSeq if __name__ == "__main__": (_options, _args) = _getArgs() dataSet = _options.dataSet numTrain = _options.trainingDataSize print "run adaptive filter on ", dataSet sequence = readDataSet(dataSet) # predict 5 steps ahead predictionStep = 5 sequence, meanSeq, stdSeq = normalizeSequence(sequence) targetInput = np.zeros((len(sequence),)) predictedInput = np.zeros((len(sequence),)) numTrain = 6000 filterLength = 10 for i in xrange(numTrain, len(sequence) - predictionStep): y, e, w = adaptfilt.lms(sequence[(i-numTrain):(i-predictionStep+1)], sequence[(i-numTrain+predictionStep):(i+1)], M=filterLength, step=0.01) # use the resulting filter coefficeints to make prediction target = np.convolve(sequence[(i-filterLength):(i+1)], w) predictedInput[i] = target[filterLength] targetInput[i] = sequence[i + predictionStep] print "record {} value {} predicted {}".format(i, targetInput[i], predictedInput[i]) predictedInput = (predictedInput * stdSeq) + meanSeq targetInput = (targetInput * stdSeq) + meanSeq saveResultToFile(dataSet, predictedInput, 'adaptiveFilter') from plot import computeAltMAPE, computeNRMSE MAPE = computeAltMAPE(predictedInput, targetInput, startFrom=6000) NRMSE = computeNRMSE(predictedInput, targetInput, startFrom=6000) print "MAPE {}".format(MAPE) print "NRMSE {}".format(NRMSE) # # plt.figure() # plt.plot(targetInput) # plt.plot(predictedInput) # plt.xlim([12800, 13500]) # plt.ylim([0, 30000])
agpl-3.0
parenthetical-e/pyentropy
docs/sphinxext/inheritance_diagram.py
98
13648
""" Defines a docutils directive for inserting inheritance diagrams. Provide the directive with one or more classes or modules (separated by whitespace). For modules, all of the classes in that module will be used. Example:: Given the following classes: class A: pass class B(A): pass class C(A): pass class D(B, C): pass class E(B): pass .. inheritance-diagram: D E Produces a graph like the following: A / \ B C / \ / E D The graph is inserted as a PNG+image map into HTML and a PDF in LaTeX. """ import inspect import os import re import subprocess try: from hashlib import md5 except ImportError: from md5 import md5 from docutils.nodes import Body, Element from docutils.parsers.rst import directives from sphinx.roles import xfileref_role def my_import(name): """Module importer - taken from the python documentation. This function allows importing names with dots in them.""" mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod class DotException(Exception): pass class InheritanceGraph(object): """ Given a list of classes, determines the set of classes that they inherit from all the way to the root "object", and then is able to generate a graphviz dot graph from them. """ def __init__(self, class_names, show_builtins=False): """ *class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph. """ self.class_names = class_names self.classes = self._import_classes(class_names) self.all_classes = self._all_classes(self.classes) if len(self.all_classes) == 0: raise ValueError("No classes found for inheritance diagram") self.show_builtins = show_builtins py_sig_re = re.compile(r'''^([\w.]*\.)? # class names (\w+) \s* $ # optionally arguments ''', re.VERBOSE) def _import_class_or_module(self, name): """ Import a class using its fully-qualified *name*. """ try: path, base = self.py_sig_re.match(name).groups() except: raise ValueError( "Invalid class or module '%s' specified for inheritance diagram" % name) fullname = (path or '') + base path = (path and path.rstrip('.')) if not path: path = base try: module = __import__(path, None, None, []) # We must do an import of the fully qualified name. Otherwise if a # subpackage 'a.b' is requested where 'import a' does NOT provide # 'a.b' automatically, then 'a.b' will not be found below. This # second call will force the equivalent of 'import a.b' to happen # after the top-level import above. my_import(fullname) except ImportError: raise ValueError( "Could not import class or module '%s' specified for inheritance diagram" % name) try: todoc = module for comp in fullname.split('.')[1:]: todoc = getattr(todoc, comp) except AttributeError: raise ValueError( "Could not find class or module '%s' specified for inheritance diagram" % name) # If a class, just return it if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes raise ValueError( "'%s' does not resolve to a class or module" % name) def _import_classes(self, class_names): """ Import a list of classes. """ classes = [] for name in class_names: classes.extend(self._import_class_or_module(name)) return classes def _all_classes(self, classes): """ Return a list of all classes that are ancestors of *classes*. """ all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if c not in all_classes: recurse(c) for cls in classes: recurse(cls) return all_classes.keys() def class_name(self, cls, parts=0): """ Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general. """ module = cls.__module__ if module == '__builtin__': fullname = cls.__name__ else: fullname = "%s.%s" % (module, cls.__name__) if parts == 0: return fullname name_parts = fullname.split('.') return '.'.join(name_parts[-parts:]) def get_all_class_names(self): """ Get all of the class names involved in the graph. """ return [self.class_name(x) for x in self.all_classes] # These are the default options for graphviz default_graph_options = { "rankdir": "LR", "size": '"8.0, 12.0"' } default_node_options = { "shape": "box", "fontsize": 10, "height": 0.25, "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", "style": '"setlinewidth(0.5)"' } default_edge_options = { "arrowsize": 0.5, "style": '"setlinewidth(0.5)"' } def _format_node_options(self, options): return ','.join(["%s=%s" % x for x in options.items()]) def _format_graph_options(self, options): return ''.join(["%s=%s;\n" % x for x in options.items()]) def generate_dot(self, fd, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Generate a graphviz dot graph from the classes that were passed in to __init__. *fd* is a Python file-like object to write to. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls *graph_options*, *node_options*, *edge_options* are dictionaries containing key/value pairs to pass on as graphviz properties. """ g_options = self.default_graph_options.copy() g_options.update(graph_options) n_options = self.default_node_options.copy() n_options.update(node_options) e_options = self.default_edge_options.copy() e_options.update(edge_options) fd.write('digraph %s {\n' % name) fd.write(self._format_graph_options(g_options)) for cls in self.all_classes: if not self.show_builtins and cls in __builtins__.values(): continue name = self.class_name(cls, parts) # Write the node this_node_options = n_options.copy() url = urls.get(self.class_name(cls)) if url is not None: this_node_options['URL'] = '"%s"' % url fd.write(' "%s" [%s];\n' % (name, self._format_node_options(this_node_options))) # Write the edges for base in cls.__bases__: if not self.show_builtins and base in __builtins__.values(): continue base_name = self.class_name(base, parts) fd.write(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_options(e_options))) fd.write('}\n') def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result class inheritance_diagram(Body, Element): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass def inheritance_diagram_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ Run when the inheritance_diagram directive is first encountered. """ node = inheritance_diagram() class_names = arguments # Create a graph starting with the list of classes graph = InheritanceGraph(class_names) # Create xref nodes for each target of the graph's image map and # add them to the doc tree so that Sphinx can resolve the # references to real URLs later. These nodes will eventually be # removed from the doctree after we're done with them. for name in graph.get_all_class_names(): refnodes, x = xfileref_role( 'class', ':class:`%s`' % name, name, 0, state) node.extend(refnodes) # Store the graph object so we can use it to generate the # dot file later node['graph'] = graph # Store the original content for use as a hash node['parts'] = options.get('parts', 0) node['content'] = " ".join(class_names) return [node] def get_graph_hash(node): return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] def html_output_graph(self, node): """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash path = '_images' dest_path = os.path.join(setup.app.builder.outdir, path) if not os.path.exists(dest_path): os.makedirs(dest_path) png_path = os.path.join(dest_path, name + ".png") path = setup.app.builder.imgpath # Create a mapping from fully-qualified class names to URLs. urls = {} for child in node: if child.get('refuri') is not None: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: urls[child['reftitle']] = '#' + child.get('refid') # These arguments to dot will save a PNG file to disk and write # an HTML image map to stdout. image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], name, parts, urls) return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' % (path, name, name, image_map)) def latex_output_graph(self, node): """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) if not os.path.exists(dest_path): os.makedirs(dest_path) pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) graph.run_dot(['-Tpdf', '-o%s' % pdf_path], name, parts, graph_options={'size': '"6.0,6.0"'}) return '\n\\includegraphics{%s}\n\n' % pdf_path def visit_inheritance_diagram(inner_func): """ This is just a wrapper around html/latex_output_graph to make it easier to handle errors and insert warnings. """ def visitor(self, node): try: content = inner_func(self, node) except DotException, e: # Insert the exception as a warning in the document warning = self.document.reporter.warning(str(e), line=node.line) warning.parent = node node.children = [warning] else: source = self.document.attributes['source'] self.body.append(content) node.children = [] return visitor def do_nothing(self, node): pass def setup(app): setup.app = app setup.confdir = app.confdir app.add_node( inheritance_diagram, latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), html=(visit_inheritance_diagram(html_output_graph), do_nothing)) app.add_directive( 'inheritance-diagram', inheritance_diagram_directive, False, (1, 100, 0), parts = directives.nonnegative_int)
gpl-2.0
romanorac/discomll
discomll/tests/tests_classification.py
1
4525
import unittest import numpy as np import Orange from disco.core import result_iterator import datasets class Tests_Classification(unittest.TestCase): @classmethod def setUpClass(self): import chunk_testdata from disco import ddfs ddfs = ddfs.DDFS() if not ddfs.exists("test:ex3"): print "Chunking test datasets to DDFS..." chunk_testdata.chunk_testdata() def test_naivebayes_breastcancer(self): # python -m unittest tests_classification.Tests_Classification.test_naivebayes_breastcancer from discomll.classification import naivebayes train_data1, test_data1 = datasets.breastcancer_disc_orange() train_data2, test_data2 = datasets.breastcancer_disc_discomll() for m in range(3): learner = Orange.classification.bayes.NaiveLearner(m=m) classifier = learner(train_data1) predictions1 = [classifier(inst, Orange.classification.Classifier.GetBoth) for inst in test_data1] predictions1_target = [v[0].value for v in predictions1] predictions1_probs = [v[1].values() for v in predictions1] fitmodel_url = naivebayes.fit(train_data2) predictions_url = naivebayes.predict(test_data2, fitmodel_url, m=m) predictions2_target = [] predictions2_probs = [] for k, v in result_iterator(predictions_url): predictions2_target.append(v[0]) predictions2_probs.append(v[1]) self.assertListEqual(predictions1_target, predictions2_target) self.assertTrue(np.allclose(predictions1_probs, predictions2_probs)) def test_naivebayes_breastcancer_cont(self): # python -m unittest tests_classification.Tests_Classification.test_naivebayes_breastcancer_cont from sklearn.naive_bayes import GaussianNB from discomll.classification import naivebayes x_train, y_train, x_test, y_test = datasets.breastcancer_cont(replication=1) train_data, test_data = datasets.breastcancer_cont_discomll(replication=1) clf = GaussianNB() probs_log1 = clf.fit(x_train, y_train).predict_proba(x_test) fitmodel_url = naivebayes.fit(train_data) prediction_url = naivebayes.predict(test_data, fitmodel_url) probs_log2 = [v[1] for _, v in result_iterator(prediction_url)] self.assertTrue(np.allclose(probs_log1, probs_log2, atol=1e-8)) def test_log_reg_thetas(self): # python tests_classification.py Tests_Classification.test_log_reg_thetas from discomll.classification import logistic_regression train_data1 = datasets.ex4_orange() train_data2 = datasets.ex4_discomll() lr = Orange.classification.logreg.LogRegFitter_Cholesky(train_data1) thetas1 = lr[1] thetas_url = logistic_regression.fit(train_data2) thetas2 = [v for k, v in result_iterator(thetas_url["logreg_fitmodel"]) if k == "thetas"] self.assertTrue(np.allclose(thetas1, thetas2)) def test_log_reg(self): # python tests_classification.py Tests_Classification.test_log_reg from discomll.classification import logistic_regression train_data1, test_data1 = datasets.breastcancer_cont_orange() train_data2, test_data2 = datasets.breastcancer_cont_discomll() learner = Orange.classification.logreg.LogRegLearner(fitter=Orange.classification.logreg.LogRegFitter_Cholesky) classifier = learner(train_data1) thetas1 = classifier.beta predictions1 = [] probabilities1 = [] for inst in test_data1: target, probs = classifier(inst, Orange.classification.Classifier.GetBoth) predictions1.append(target.value) probabilities1.append(probs.values()) thetas_url = logistic_regression.fit(train_data2, alpha=1e-8, max_iterations=10) thetas2 = [v for k, v in result_iterator(thetas_url["logreg_fitmodel"]) if k == "thetas"] results_url = logistic_regression.predict(test_data2, thetas_url) predictions2 = [] probabilities2 = [] for k, v in result_iterator(results_url): predictions2.append(v[0]) probabilities2.append(v[1]) self.assertTrue(np.allclose(thetas1, thetas2)) self.assertTrue(np.allclose(probabilities1, probabilities2, atol=1e-5)) self.assertListEqual(predictions1, predictions2) if __name__ == '__main__': unittest.main()
apache-2.0