repo_name
stringlengths
6
100
path
stringlengths
4
191
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
935
727k
license
stringclasses
15 values
Eric89GXL/scipy
scipy/signal/waveforms.py
8
21065
# Author: Travis Oliphant # 2003 # # Feb. 2010: Updated by Warren Weckesser: # Rewrote much of chirp() # Added sweep_poly() from __future__ import division, print_function, absolute_import import numpy as np from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ exp, cos, sin, polyval, polyint from scipy._lib.six import string_types __all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', 'unit_impulse'] def sawtooth(t, width=1): """ Return a periodic sawtooth or triangle waveform. The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like Time. width : array_like, optional Width of the rising ramp as a proportion of the total cycle. Default is 1, producing a rising ramp, while 0 produces a falling ramp. `width` = 0.5 produces a triangle wave. If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the sawtooth waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500) >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) """ t, w = asarray(t), asarray(width) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # take t modulo 2*pi tmod = mod(t, 2 * pi) # on the interval 0 to width*2*pi function is # tmod / (pi*w) - 1 mask2 = (1 - mask1) & (tmod < w * 2 * pi) tsub = extract(mask2, tmod) wsub = extract(mask2, w) place(y, mask2, tsub / (pi * wsub) - 1) # on the interval width*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) tsub = extract(mask3, tmod) wsub = extract(mask3, w) place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) return y def square(t, duty=0.5): """ Return a periodic square-wave waveform. The square wave has a period ``2*pi``, has value +1 from 0 to ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in the interval [0,1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like The input time array. duty : array_like, optional Duty cycle. Default is 0.5 (50% duty cycle). If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the square waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500, endpoint=False) >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) >>> plt.ylim(-2, 2) A pulse-width modulated sine wave: >>> plt.figure() >>> sig = np.sin(2 * np.pi * t) >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) >>> plt.subplot(2, 1, 1) >>> plt.plot(t, sig) >>> plt.subplot(2, 1, 2) >>> plt.plot(t, pwm) >>> plt.ylim(-1.5, 1.5) """ t, w = asarray(t), asarray(duty) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # on the interval 0 to duty*2*pi function is 1 tmod = mod(t, 2 * pi) mask2 = (1 - mask1) & (tmod < w * 2 * pi) place(y, mask2, 1) # on the interval duty*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) place(y, mask3, -1) return y def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False): """ Return a Gaussian modulated sinusoid: ``exp(-a t^2) exp(1j*2*pi*fc*t).`` If `retquad` is True, then return the real and imaginary parts (in-phase and quadrature). If `retenv` is True, then return the envelope (unmodulated signal). Otherwise, return the real part of the modulated sinusoid. Parameters ---------- t : ndarray or the string 'cutoff' Input array. fc : int, optional Center frequency (e.g. Hz). Default is 1000. bw : float, optional Fractional bandwidth in frequency domain of pulse (e.g. Hz). Default is 0.5. bwr : float, optional Reference level at which fractional bandwidth is calculated (dB). Default is -6. tpr : float, optional If `t` is 'cutoff', then the function returns the cutoff time for when the pulse amplitude falls below `tpr` (in dB). Default is -60. retquad : bool, optional If True, return the quadrature (imaginary) as well as the real part of the signal. Default is False. retenv : bool, optional If True, return the envelope of the signal. Default is False. Returns ------- yI : ndarray Real part of signal. Always returned. yQ : ndarray Imaginary part of signal. Only returned if `retquad` is True. yenv : ndarray Envelope of signal. Only returned if `retenv` is True. See Also -------- scipy.signal.morlet Examples -------- Plot real component, imaginary component, and envelope for a 5 Hz pulse, sampled at 100 Hz for 2 seconds: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) >>> plt.plot(t, i, t, q, t, e, '--') """ if fc < 0: raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) if bw <= 0: raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) if bwr >= 0: raise ValueError("Reference level for bandwidth (bwr=%.2f) must " "be < 0 dB" % bwr) # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) ref = pow(10.0, bwr / 20.0) # fdel = fc*bw/2: g(fdel) = ref --- solve this for a # # pi^2/a * fc^2 * bw^2 /4=-log(ref) a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) if isinstance(t, string_types): if t == 'cutoff': # compute cut_off point # Solve exp(-a tc**2) = tref for tc # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) if tpr >= 0: raise ValueError("Reference level for time cutoff must " "be < 0 dB") tref = pow(10.0, tpr / 20.0) return sqrt(-log(tref) / a) else: raise ValueError("If `t` is a string, it must be 'cutoff'") yenv = exp(-a * t * t) yI = yenv * cos(2 * pi * fc * t) yQ = yenv * sin(2 * pi * fc * t) if not retquad and not retenv: return yI if not retquad and retenv: return yI, yenv if retquad and not retenv: return yI, yQ if retquad and retenv: return yI, yQ, yenv def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): """Frequency-swept cosine generator. In the following, 'Hz' should be interpreted as 'cycles per unit'; there is no requirement here that the unit is one second. The important distinction is that the units of rotation are cycles, not radians. Likewise, `t` could be a measurement of space instead of time. Parameters ---------- t : array_like Times at which to evaluate the waveform. f0 : float Frequency (e.g. Hz) at time t=0. t1 : float Time at which `f1` is specified. f1 : float Frequency (e.g. Hz) of the waveform at time `t1`. method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional Kind of frequency sweep. If not given, `linear` is assumed. See Notes below for more details. phi : float, optional Phase offset, in degrees. Default is 0. vertex_zero : bool, optional This parameter is only used when `method` is 'quadratic'. It determines whether the vertex of the parabola that is the graph of the frequency is at t=0 or t=t1. Returns ------- y : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. See Also -------- sweep_poly Notes ----- There are four options for the `method`. The following formulas give the instantaneous frequency (in Hz) of the signal generated by `chirp()`. For convenience, the shorter names shown below may also be used. linear, lin, li: ``f(t) = f0 + (f1 - f0) * t / t1`` quadratic, quad, q: The graph of the frequency f(t) is a parabola through (0, f0) and (t1, f1). By default, the vertex of the parabola is at (0, f0). If `vertex_zero` is False, then the vertex is at (t1, f1). The formula is: if vertex_zero is True: ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` else: ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` To use a more general quadratic function, or an arbitrary polynomial, use the function `scipy.signal.sweep_poly`. logarithmic, log, lo: ``f(t) = f0 * (f1/f0)**(t/t1)`` f0 and f1 must be nonzero and have the same sign. This signal is also known as a geometric or exponential chirp. hyperbolic, hyp: ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` f0 and f1 must be nonzero. Examples -------- The following will be used in the examples: >>> from scipy.signal import chirp, spectrogram >>> import matplotlib.pyplot as plt For the first example, we'll plot the waveform for a linear chirp from 6 Hz to 1 Hz over 10 seconds: >>> t = np.linspace(0, 10, 5001) >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') >>> plt.plot(t, w) >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") >>> plt.xlabel('t (sec)') >>> plt.show() For the remaining examples, we'll use higher frequency ranges, and demonstrate the result using `scipy.signal.spectrogram`. We'll use a 10 second interval sampled at 8000 Hz. >>> fs = 8000 >>> T = 10 >>> t = np.linspace(0, T, T*fs, endpoint=False) Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds (vertex of the parabolic curve of the frequency is at t=0): >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds (vertex of the parabolic curve of the frequency is at t=10): >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic', ... vertex_zero=False) >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250\\n' + ... '(vertex_zero=False)') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds: >>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds: >>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() """ # 'phase' is computed in _chirp_phase, to make testing easier. phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) # Convert phi to radians. phi *= pi / 180 return cos(phase + phi) def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): """ Calculate the phase used by `chirp` to generate its output. See `chirp` for a description of the arguments. """ t = asarray(t) f0 = float(f0) t1 = float(t1) f1 = float(f1) if method in ['linear', 'lin', 'li']: beta = (f1 - f0) / t1 phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) elif method in ['quadratic', 'quad', 'q']: beta = (f1 - f0) / (t1 ** 2) if vertex_zero: phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) else: phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) elif method in ['logarithmic', 'log', 'lo']: if f0 * f1 <= 0.0: raise ValueError("For a logarithmic chirp, f0 and f1 must be " "nonzero and have the same sign.") if f0 == f1: phase = 2 * pi * f0 * t else: beta = t1 / log(f1 / f0) phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) elif method in ['hyperbolic', 'hyp']: if f0 == 0 or f1 == 0: raise ValueError("For a hyperbolic chirp, f0 and f1 must be " "nonzero.") if f0 == f1: # Degenerate case: constant frequency. phase = 2 * pi * f0 * t else: # Singular point: the instantaneous frequency blows up # when t == sing. sing = -f1 * t1 / (f0 - f1) phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) else: raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," " or 'hyperbolic', but a value of %r was given." % method) return phase def sweep_poly(t, poly, phi=0): """ Frequency-swept cosine generator, with a time-dependent frequency. This function generates a sinusoidal function whose instantaneous frequency varies with time. The frequency at time `t` is given by the polynomial `poly`. Parameters ---------- t : ndarray Times at which to evaluate the waveform. poly : 1-D array_like or instance of numpy.poly1d The desired frequency expressed as a polynomial. If `poly` is a list or ndarray of length n, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of numpy.poly1d, then the instantaneous frequency is ``f(t) = poly(t)`` phi : float, optional Phase offset, in degrees, Default: 0. Returns ------- sweep_poly : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. See Also -------- chirp Notes ----- .. versionadded:: 0.8.0 If `poly` is a list or ndarray of length `n`, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is: ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of `numpy.poly1d`, then the instantaneous frequency is: ``f(t) = poly(t)`` Finally, the output `s` is: ``cos(phase + (pi/180)*phi)`` where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, ``f(t)`` as defined above. Examples -------- Compute the waveform with instantaneous frequency:: f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 over the interval 0 <= t <= 10. >>> from scipy.signal import sweep_poly >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) >>> t = np.linspace(0, 10, 5001) >>> w = sweep_poly(t, p) Plot it: >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> plt.plot(t, w) >>> plt.title("Sweep Poly\\nwith frequency " + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") >>> plt.subplot(2, 1, 2) >>> plt.plot(t, p(t), 'r', label='f(t)') >>> plt.legend() >>> plt.xlabel('t') >>> plt.tight_layout() >>> plt.show() """ # 'phase' is computed in _sweep_poly_phase, to make testing easier. phase = _sweep_poly_phase(t, poly) # Convert to radians. phi *= pi / 180 return cos(phase + phi) def _sweep_poly_phase(t, poly): """ Calculate the phase used by sweep_poly to generate its output. See `sweep_poly` for a description of the arguments. """ # polyint handles lists, ndarrays and instances of poly1d automatically. intpoly = polyint(poly) phase = 2 * pi * polyval(intpoly, t) return phase def unit_impulse(shape, idx=None, dtype=float): """ Unit impulse signal (discrete delta function) or unit basis vector. Parameters ---------- shape : int or tuple of int Number of samples in the output (1-D), or a tuple that represents the shape of the output (N-D). idx : None or int or tuple of int or 'mid', optional Index at which the value is 1. If None, defaults to the 0th element. If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in all dimensions. If an int, the impulse will be at `idx` in all dimensions. dtype : data-type, optional The desired data-type for the array, e.g., ``numpy.int8``. Default is ``numpy.float64``. Returns ------- y : ndarray Output array containing an impulse signal. Notes ----- The 1D case is also known as the Kronecker delta. .. versionadded:: 0.19.0 Examples -------- An impulse at the 0th element (:math:`\\delta[n]`): >>> from scipy import signal >>> signal.unit_impulse(8) array([ 1., 0., 0., 0., 0., 0., 0., 0.]) Impulse offset by 2 samples (:math:`\\delta[n-2]`): >>> signal.unit_impulse(7, 2) array([ 0., 0., 1., 0., 0., 0., 0.]) 2-dimensional impulse, centered: >>> signal.unit_impulse((3, 3), 'mid') array([[ 0., 0., 0.], [ 0., 1., 0.], [ 0., 0., 0.]]) Impulse at (2, 2), using broadcasting: >>> signal.unit_impulse((4, 4), 2) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 0.]]) Plot the impulse response of a 4th-order Butterworth lowpass filter: >>> imp = signal.unit_impulse(100, 'mid') >>> b, a = signal.butter(4, 0.2) >>> response = signal.lfilter(b, a, imp) >>> import matplotlib.pyplot as plt >>> plt.plot(np.arange(-50, 50), imp) >>> plt.plot(np.arange(-50, 50), response) >>> plt.margins(0.1, 0.1) >>> plt.xlabel('Time [samples]') >>> plt.ylabel('Amplitude') >>> plt.grid(True) >>> plt.show() """ out = zeros(shape, dtype) shape = np.atleast_1d(shape) if idx is None: idx = (0,) * len(shape) elif idx == 'mid': idx = tuple(shape // 2) elif not hasattr(idx, "__iter__"): idx = (idx,) * len(shape) out[idx] = 1 return out
bsd-3-clause
GitYiheng/reinforcement_learning_test
test00_previous_files/pendulum_test/matplotlib_test_100920170006.py
1
2209
from numpy import sin, cos import numpy as np import matplotlib.pyplot as plt import scipy.integrate as integrate import matplotlib.animation as animation G = 9.8 # acceleration due to gravity, in m/s^2 L1 = 1.0 # length of pendulum 1 in m L2 = 1.0 # length of pendulum 2 in m M1 = 1.0 # mass of pendulum 1 in kg M2 = 1.0 # mass of pendulum 2 in kg def derivs(state, t): dydx = np.zeros_like(state) dydx[0] = state[1] del_ = state[2] - state[0] den1 = (M1 + M2)*L1 - M2*L1*cos(del_)*cos(del_) dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_) + M2*G*sin(state[2])*cos(del_) + M2*L2*state[3]*state[3]*sin(del_) - (M1 + M2)*G*sin(state[0]))/den1 dydx[2] = state[3] den2 = (L2/L1)*den1 dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_) + (M1 + M2)*G*sin(state[0])*cos(del_) - (M1 + M2)*L1*state[1]*state[1]*sin(del_) - (M1 + M2)*G*sin(state[2]))/den2 return dydx # create a time array from 0..100 sampled at 0.05 second steps dt = 0.05 t = np.arange(0.0, 20, dt) # th1 and th2 are the initial angles (degrees) # w10 and w20 are the initial angular velocities (degrees per second) th1 = 120.0 w1 = 0.0 th2 = -10.0 w2 = 0.0 # initial state state = np.radians([th1, w1, th2, w2]) # integrate your ODE using scipy.integrate. y = integrate.odeint(derivs, state, t) x1 = L1*sin(y[:, 0]) y1 = -L1*cos(y[:, 0]) x2 = L2*sin(y[:, 2]) + x1 y2 = -L2*cos(y[:, 2]) + y1 fig = plt.figure() ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2)) ax.grid() line, = ax.plot([], [], 'o-', lw=2) time_template = 'time = %.1fs' time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes) def init(): line.set_data([], []) time_text.set_text('') return line, time_text def animate(i): thisx = [0, x1[i], x2[i]] thisy = [0, y1[i], y2[i]] line.set_data(thisx, thisy) time_text.set_text(time_template % (i*dt)) return line, time_text ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)), interval=25, blit=True, init_func=init) # ani.save('double_pendulum.mp4', fps=15) plt.show()
mit
trankmichael/scikit-learn
sklearn/linear_model/tests/test_ransac.py
216
13290
import numpy as np from numpy.testing import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises_regexp from scipy import sparse from sklearn.utils.testing import assert_less from sklearn.linear_model import LinearRegression, RANSACRegressor from sklearn.linear_model.ransac import _dynamic_max_trials # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0) assert_raises_regexp(ValueError, "No inliers.*residual_threshold.*0\.0", ransac_estimator.fit, X, y) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y)
bsd-3-clause
manns/pyspread
pyspread/interfaces/pys.py
1
19437
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright Martin Manns # Distributed under the terms of the GNU General Public License # -------------------------------------------------------------------- # pyspread is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyspread is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyspread. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------- """ This file contains interfaces to the native pys file format. PysReader and PysWriter classed are structured into the following sections: * shape * code * attributes * row_heights * col_widths * macros **Provides** * :func:`wxcolor2rgb` * :dict:`wx2qt_fontweights` * :dict:`wx2qt_fontstyles` * :class:`PysReader` * :class:`PysWriter` """ from builtins import str, map, object import ast from base64 import b64decode, b85encode from collections import OrderedDict from typing import Any, BinaryIO, Callable, Iterable, Tuple try: from pyspread.lib.attrdict import AttrDict from pyspread.lib.selection import Selection from pyspread.model.model import CellAttribute, CodeArray except ImportError: from lib.attrdict import AttrDict from lib.selection import Selection from model.model import CellAttribute, CodeArray def wxcolor2rgb(wxcolor: int) -> Tuple[int, int, int]: """Returns red, green, blue for given wxPython binary color value :param wxcolor: Color value from wx.Color """ red = wxcolor >> 16 green = wxcolor - (red << 16) >> 8 blue = wxcolor - (red << 16) - (green << 8) return red, green, blue wx2qt_fontweights = { 90: 50, # wx.FONTWEIGHT_NORMAL 91: 25, # wx.FONTWEIGHT_LIGHT 92: 75, # wx.FONTWEIGHT_BOLD 93: 87, # wx.FONTWEIGHT_MAX } wx2qt_fontstyles = { 90: 0, # wx.FONTSTYLE_NORMAL 93: 1, # wx.FONTSTYLE_ITALIC 94: 1, # wx.FONTSTYLE_SLANT 95: 2, # wx.FONTSTYLE_MAX } class PysReader: """Reads pys v2.0 file into a code_array""" def __init__(self, pys_file: BinaryIO, code_array: CodeArray): """ :param pys_file: The pys or pysu file to be read :param code_array: Target code_array """ self.pys_file = pys_file self.code_array = code_array self._section2reader = { "[Pyspread save file version]\n": self._pys_version, "[shape]\n": self._pys2shape, "[grid]\n": self._pys2code, "[attributes]\n": self._pys2attributes, "[row_heights]\n": self._pys2row_heights, "[col_widths]\n": self._pys2col_widths, "[macros]\n": self._pys2macros, } # When converting old versions, cell attributes are rquired that # take place after the cell attribute readout self.cell_attributes_postfixes = [] def __iter__(self): """Iterates over self.pys_file, replacing everything in code_array""" state = None # Reset pys_file to start to enable multiple calls of this method self.pys_file.seek(0) for line in self.pys_file: line = line.decode("utf8") if line in self._section2reader: state = line elif state is not None: self._section2reader[state](line) yield line # Apply cell attributes post fixes for cell_attribute in self.cell_attributes_postfixes: self.code_array.cell_attributes.append(cell_attribute) # Decorators def version_handler(method: Callable) -> Callable: """Chooses method`_10` of method if version < 2.0 :param method: Method to be replaced in case of old pys file version """ def new_method(self, *args, **kwargs): if self.version <= 1.0: method10 = getattr(self, method.__name__+"_10") method10(*args, **kwargs) else: method(self, *args, **kwargs) return new_method # Helpers def _split_tidy(self, string: str, maxsplit: int = None) -> str: """Rstrips string for \n and splits string for \t :param string: String to be rstripped and split :param maxsplit: Maximum number of splits """ if maxsplit is None: return string.rstrip("\n").split("\t") else: return string.rstrip("\n").split("\t", maxsplit) def _get_key(self, *keystrings: str) -> Tuple[int, ...]: """Returns int key tuple from key string list :param keystrings: Strings that contain integers that are key elements """ return tuple(map(int, keystrings)) # Sections def _pys_version(self, line: str): """pys file version including assertion :param line: Pys file line to be parsed """ self.version = float(line.strip()) if self.version > 2.0: # Abort if file version not supported msg = "File version {version} unsupported (> 2.0)." raise ValueError(msg.format(version=line.strip())) def _pys2shape(self, line: str): """Updates shape in code_array :param line: Pys file line to be parsed """ shape = self._get_key(*self._split_tidy(line)) if any(dim <= 0 for dim in shape): # Abort if any axis is 0 or less msg = "Code array has invalid shape {shape}." raise ValueError(msg.format(shape=shape)) self.code_array.shape = shape def _code_convert_1_2(self, key: Tuple[int, int, int], code: str) -> str: """Converts chart and image code from v1.0 to v2.0 :param key: Key of cell with code :param code: Code in cell to be converted """ def get_image_code(image_data: str, width: int, height: int) -> str: """Returns code string for v2.0 :param image_data: b85encoded image data :param width: Image width :param height: Image height """ image_buffer_tpl = 'bz2.decompress(base64.b85decode({data}))' image_array_tpl = 'numpy.frombuffer({buffer}, dtype="uint8")' image_matrix_tpl = '{array}.reshape({height}, {width}, 3)' image_buffer = image_buffer_tpl.format(data=image_data) image_array = image_array_tpl.format(buffer=image_buffer) image_matrix = image_matrix_tpl.format(array=image_array, height=height, width=width) return image_matrix start_str = "bz2.decompress(base64.b64decode('" size_start_str = "wx.ImageFromData(" if size_start_str in code and start_str in code: size_start = code.index(size_start_str) + len(size_start_str) size_str_list = code[size_start:].split(",")[:2] width, height = tuple(map(int, size_str_list)) # We have a cell that displays a bitmap data_start = code.index(start_str) + len(start_str) data_stop = code.find("'", data_start) enc_data = bytes(code[data_start:data_stop], encoding='utf-8') compressed_image_data = b64decode(enc_data) reenc_data = b85encode(compressed_image_data) code = get_image_code(repr(reenc_data), width, height) selection = Selection([], [], [], [], [(key[0], key[1])]) tab = key[2] attr_dict = AttrDict([("renderer", "image")]) attr = CellAttribute(selection, tab, attr_dict) self.cell_attributes_postfixes.append(attr) elif "charts.ChartFigure(" in code: # We have a matplotlib figure selection = Selection([], [], [], [], [(key[0], key[1])]) tab = key[2] attr_dict = AttrDict([("renderer", "matplotlib")]) attr = CellAttribute(selection, tab, attr_dict) self.cell_attributes_postfixes.append(attr) return code def _pys2code_10(self, line: str): """Updates code in pys code_array - for save file version 1.0 :param line: Pys file line to be parsed """ row, col, tab, code = self._split_tidy(line, maxsplit=3) key = self._get_key(row, col, tab) if all(0 <= key[i] < self.code_array.shape[i] for i in range(3)): self.code_array.dict_grid[key] = str(self._code_convert_1_2(key, code)) @version_handler def _pys2code(self, line: str): """Updates code in pys code_array :param line: Pys file line to be parsed """ row, col, tab, code = self._split_tidy(line, maxsplit=3) key = self._get_key(row, col, tab) if all(0 <= key[i] < self.code_array.shape[i] for i in range(3)): self.code_array.dict_grid[key] = ast.literal_eval(code) def _attr_convert_1to2(self, key: str, value: Any) -> Tuple[str, Any]: """Converts key, value attribute pair from v1.0 to v2.0 :param key: AttrDict key :param value: AttrDict value for key """ color_attrs = ["bordercolor_bottom", "bordercolor_right", "bgcolor", "textcolor"] if key in color_attrs: return key, wxcolor2rgb(value) elif key == "fontweight": return key, wx2qt_fontweights[value] elif key == "fontstyle": return key, wx2qt_fontstyles[value] elif key == "markup" and value: return "renderer", "markup" elif key == "angle" and value < 0: return "angle", 360 + value elif key == "merge_area": # Value in v1.0 None if the cell was merged # In v 2.0 this is no longer necessary return None, value # Update justifiaction and alignment values elif key in ["vertical_align", "justification"]: just_align_value_tansitions = { "left": "justify_left", "center": "justify_center", "right": "justify_right", "top": "align_top", "middle": "align_center", "bottom": "align_bottom", } return key, just_align_value_tansitions[value] return key, value def _pys2attributes_10(self, line: str): """Updates attributes in code_array - for save file version 1.0 :param line: Pys file line to be parsed """ splitline = self._split_tidy(line) selection_data = list(map(ast.literal_eval, splitline[:5])) selection = Selection(*selection_data) tab = int(splitline[5]) attr_dict = AttrDict() old_merged_cells = {} for col, ele in enumerate(splitline[6:]): if not (col % 2): # Odd entries are keys key = ast.literal_eval(ele) else: # Even cols are values value = ast.literal_eval(ele) # Convert old wx color values and merged cells key_, value_ = self._attr_convert_1to2(key, value) if key_ is None and value_ is not None: # We have a merged cell old_merged_cells[value_[:2]] = value_ try: attr_dict.pop("merge_area") except KeyError: pass attr_dict[key_] = value_ attr = CellAttribute(selection, tab, attr_dict) self.code_array.cell_attributes.append(attr) for key in old_merged_cells: selection = Selection([], [], [], [], [key]) attr_dict = AttrDict([("merge_area", old_merged_cells[key])]) attr = CellAttribute(selection, tab, attr_dict) self.code_array.cell_attributes.append(attr) old_merged_cells.clear() @version_handler def _pys2attributes(self, line: str): """Updates attributes in code_array :param line: Pys file line to be parsed """ splitline = self._split_tidy(line) selection_data = list(map(ast.literal_eval, splitline[:5])) selection = Selection(*selection_data) tab = int(splitline[5]) attr_dict = AttrDict() for col, ele in enumerate(splitline[6:]): if not (col % 2): # Odd entries are keys key = ast.literal_eval(ele) else: # Even cols are values value = ast.literal_eval(ele) attr_dict[key] = value attr = CellAttribute(selection, tab, attr_dict) self.code_array.cell_attributes.append(attr) def _pys2row_heights(self, line: str): """Updates row_heights in code_array :param line: Pys file line to be parsed """ # Split with maxsplit 3 split_line = self._split_tidy(line) key = row, tab = self._get_key(*split_line[:2]) height = float(split_line[2]) shape = self.code_array.shape try: if row < shape[0] and tab < shape[2]: self.code_array.row_heights[key] = height except ValueError: pass def _pys2col_widths(self, line: str): """Updates col_widths in code_array :param line: Pys file line to be parsed """ # Split with maxsplit 3 split_line = self._split_tidy(line) key = col, tab = self._get_key(*split_line[:2]) width = float(split_line[2]) shape = self.code_array.shape try: if col < shape[1] and tab < shape[2]: self.code_array.col_widths[key] = width except ValueError: pass def _pys2macros(self, line: str): """Updates macros in code_array :param line: Pys file line to be parsed """ self.code_array.macros += line class PysWriter(object): """Interface between code_array and pys file data Iterating over it yields pys file lines """ def __init__(self, code_array: CodeArray): """ :param code_array: The code_array object data structure """ self.code_array = code_array self.version = 2.0 self._section2writer = OrderedDict([ ("[Pyspread save file version]\n", self._version2pys), ("[shape]\n", self._shape2pys), ("[grid]\n", self._code2pys), ("[attributes]\n", self._attributes2pys), ("[row_heights]\n", self._row_heights2pys), ("[col_widths]\n", self._col_widths2pys), ("[macros]\n", self._macros2pys), ]) def __iter__(self) -> Iterable[str]: """Yields a pys_file line wise from code_array""" for key in self._section2writer: yield key for line in self._section2writer[key](): yield line def __len__(self) -> int: """Returns how many lines will be written when saving the code_array""" lines = 9 # Headers + 1 line version + 1 line shape lines += len(self.code_array.dict_grid) lines += len(self.code_array.cell_attributes) lines += len(self.code_array.dict_grid.row_heights) lines += len(self.code_array.dict_grid.col_widths) lines += self.code_array.dict_grid.macros.count('\n') return lines def _version2pys(self) -> Iterable[str]: """Returns pys file version information in pys format Format: <version>\n """ yield repr(self.version) + "\n" def _shape2pys(self) -> Iterable[str]: """Returns shape information in pys format Format: <rows>\t<cols>\t<tabs>\n """ yield u"\t".join(map(str, self.code_array.shape)) + u"\n" def _code2pys(self) -> Iterable[str]: """Returns cell code information in pys format Format: <row>\t<col>\t<tab>\t<code>\n """ for key in self.code_array: key_str = u"\t".join(repr(ele) for ele in key) if self.version <= 1.0: code_str = self.code_array(key) else: code_str = repr(self.code_array(key)) out_str = key_str + u"\t" + code_str + u"\n" yield out_str def _attributes2pys(self) -> Iterable[str]: """Returns cell attributes information in pys format Format: <selection[0]>\t[...]\t<tab>\t<key>\t<value>\t[...]\n """ # Remove doublettes purged_cell_attributes = [] purged_cell_attributes_keys = [] for selection, tab, attr_dict in self.code_array.cell_attributes: if purged_cell_attributes_keys and \ (selection, tab) == purged_cell_attributes_keys[-1]: purged_cell_attributes[-1][2].update(attr_dict) else: purged_cell_attributes_keys.append((selection, tab)) purged_cell_attributes.append([selection, tab, attr_dict]) for selection, tab, attr_dict in purged_cell_attributes: sel_list = [selection.block_tl, selection.block_br, selection.rows, selection.columns, selection.cells] tab_list = [tab] attr_dict_list = [] for key in attr_dict: if key is not None: attr_dict_list.append(key) attr_dict_list.append(attr_dict[key]) line_list = list(map(repr, sel_list + tab_list + attr_dict_list)) yield u"\t".join(line_list) + u"\n" def _row_heights2pys(self) -> Iterable[str]: """Returns row height information in pys format Format: <row>\t<tab>\t<value>\n """ for row, tab in self.code_array.dict_grid.row_heights: if row < self.code_array.shape[0] and \ tab < self.code_array.shape[2]: height = self.code_array.dict_grid.row_heights[(row, tab)] height_strings = list(map(repr, [row, tab, height])) yield u"\t".join(height_strings) + u"\n" def _col_widths2pys(self) -> Iterable[str]: """Returns column width information in pys format Format: <col>\t<tab>\t<value>\n """ for col, tab in self.code_array.dict_grid.col_widths: if col < self.code_array.shape[1] and \ tab < self.code_array.shape[2]: width = self.code_array.dict_grid.col_widths[(col, tab)] width_strings = list(map(repr, [col, tab, width])) yield u"\t".join(width_strings) + u"\n" def _macros2pys(self) -> Iterable[str]: """Returns macros information in pys format Format: <macro code line>\n """ macros = self.code_array.dict_grid.macros yield macros
gpl-3.0
karstenw/nodebox-pyobjc
examples/Extended Application/matplotlib/examples/api/span_regions.py
1
1627
""" ================ Using span_where ================ Illustrate some helper functions for shading regions where a logical mask is True See :meth:`matplotlib.collections.BrokenBarHCollection.span_where` """ import numpy as np import matplotlib.pyplot as plt import matplotlib.collections as collections # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end t = np.arange(0.0, 2, 0.01) s1 = np.sin(2*np.pi*t) s2 = 1.2*np.sin(4*np.pi*t) fig, ax = plt.subplots() ax.set_title('using span_where') ax.plot(t, s1, color='black') ax.axhline(0, color='black', lw=2) collection = collections.BrokenBarHCollection.span_where( t, ymin=0, ymax=1, where=s1 > 0, facecolor='green', alpha=0.5) ax.add_collection(collection) collection = collections.BrokenBarHCollection.span_where( t, ymin=-1, ymax=0, where=s1 < 0, facecolor='red', alpha=0.5) ax.add_collection(collection) pltshow(plt)
mit
deeplycloudy/MetPy
metpy/testing.py
1
2218
# Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause r"""Collection of utilities for testing This includes: * unit-aware test functions * code for testing matplotlib figures """ import numpy.testing from matplotlib import style from pint import DimensionalityError from .units import units # Our lowest supported matplotlib doesn't have the classic style, so fallback to empty list test_style = 'classic' if 'classic' in style.available else [] def check_and_drop_units(actual, desired): try: if not hasattr(desired, 'units'): actual = actual.to('dimensionless') elif not hasattr(actual, 'units'): actual = units.Quantity(actual, 'dimensionless') else: actual = actual.to(desired.units) except DimensionalityError: raise AssertionError('Units are not compatible: %s should be %s' % (actual.units, desired.units)) except AttributeError: pass if hasattr(actual, 'magnitude'): actual = actual.magnitude if hasattr(desired, 'magnitude'): desired = desired.magnitude return actual, desired def assert_almost_equal(actual, desired, decimal=7): actual, desired = check_and_drop_units(actual, desired) numpy.testing.assert_almost_equal(actual, desired, decimal) def assert_array_almost_equal(actual, desired, decimal=7): actual, desired = check_and_drop_units(actual, desired) numpy.testing.assert_array_almost_equal(actual, desired, decimal) def assert_array_equal(actual, desired): actual, desired = check_and_drop_units(actual, desired) numpy.testing.assert_array_equal(actual, desired) def make_figure(*args, **kwargs): 'Create an Agg figure for testing' from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg if 'dpi' not in kwargs: kwargs['dpi'] = 100 fig = Figure(*args, **kwargs) fig.canvas = FigureCanvasAgg(fig) return fig def hide_tick_labels(ax): 'Hide the ticklabels on an axes' ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticklabels([])
bsd-3-clause
drallensmith/neat-python
examples/neuron-demo/demo-ctrnn.py
2
1241
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import neat from neat.activations import sigmoid_activation # Create a fully-connected network of two neurons with no external inputs. node1_inputs = [(1, 0.9), (2, 0.2)] node2_inputs = [(1, -0.2), (2, 0.9)] node_evals = {1: neat.ctrnn.CTRNNNodeEval(0.01, sigmoid_activation, sum, -2.75 / 5.0, 1.0, node1_inputs), 2: neat.ctrnn.CTRNNNodeEval(0.01, sigmoid_activation, sum, -1.75 / 5.0, 1.0, node2_inputs)} net = neat.ctrnn.CTRNN([], [1, 2], node_evals) init1 = 0.0 init2 = 0.0 net.set_node_value(1, init1) net.set_node_value(2, init2) times = [0.0] outputs = [[init1, init2]] for i in range(1250): output = net.advance([], 0.002, 0.002) times.append(net.time_seconds) outputs.append(output) print("{0:.7f} {1:.7f}".format(output[0], output[1])) outputs = np.array(outputs).T plt.title("CTRNN model") plt.ylabel("Outputs") plt.xlabel("Time") plt.grid() plt.plot(times, outputs[0], "g-", label="output 0") plt.plot(times, outputs[1], "r-", label="output 1") plt.legend(loc="best") plt.figure() plt.ylabel("Output 0") plt.xlabel("Output 1") plt.grid() plt.plot(outputs[0], outputs[1], "g-") plt.show() plt.close()
bsd-3-clause
laputian/reglhot
lob/simple_lob_func.py
1
1559
import numpy as np import math lob_dim = 10 print_diagnostics = True #Market order size base_order = 1000. #LOB tick base size exp_const = 200 func_exp = 1. def market_order_on_lob(lob_def, order): ord_hold = order[1] tick = 0 slice_depth = 0 while(ord_hold > 0): slice_depth = lob_def(slice = tick) lob_new = slice_depth - ord_hold tick = tick + 1 ord_hold = - lob_new if ord_hold <= 0.: break return tick- 1 , slice_depth + ord_hold def lob_def( slice = 0, exp = func_exp): return exp_const * (slice ** exp ) import matplotlib.pyplot as plt def show_mids(mids, max, min): plt.plot(mids) plt.ylabel('stock price') plt.xlabel('time') plt.title("Limit order book trading") plt.ylim( max +2, min -2) plt.show() if __name__ == "__main__": exp_inv = 1./(func_exp + 1) const = (func_exp + 1) ** (exp_inv) prices = [] expl = [] runs = 50 for k in range(50): order = ['B', base_order * k] prices.append(market_order_on_lob(lob_def, order = order)[0]) expl.append(const * (base_order ** exp_inv)/ (exp_const ** (exp_inv)) * (k ** (exp_inv))) fig, ax = plt.subplots() ax.plot(expl, label="Function - Exp 1/"+ str(func_exp + 1)) ax.step(range(runs), prices, label="Limit order book") plt.title("Exp = " + str(func_exp)) plt.xlabel('Market order size') plt.ylabel('Price difference') legend = ax.legend(loc='lower center', shadow=True, fontsize='x-large') plt.show()
mit
ndingwall/scikit-learn
examples/manifold/plot_t_sne_perplexity.py
19
4105
""" ============================================================================= t-SNE: The effect of various perplexity values on the shape ============================================================================= An illustration of t-SNE on the two concentric circles and the S-curve datasets for different perplexity values. We observe a tendency towards clearer shapes as the perplexity value increases. The size, the distance and the shape of clusters may vary upon initialization, perplexity values and does not always convey a meaning. As shown below, t-SNE for higher perplexities finds meaningful topology of two concentric circles, however the size and the distance of the circles varies slightly from the original. Contrary to the two circles dataset, the shapes visually diverge from S-curve topology on the S-curve dataset even for larger perplexity values. For further details, "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/ provides a good discussion of the effects of various parameters, as well as interactive plots to explore those effects. """ # Author: Narine Kokhlikyan <narine@slice.com> # License: BSD print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter from sklearn import manifold, datasets from time import time n_samples = 300 n_components = 2 (fig, subplots) = plt.subplots(3, 5, figsize=(15, 8)) perplexities = [5, 30, 50, 100] X, y = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05) red = y == 0 green = y == 1 ax = subplots[0][0] ax.scatter(X[red, 0], X[red, 1], c="r") ax.scatter(X[green, 0], X[green, 1], c="g") ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') for i, perplexity in enumerate(perplexities): ax = subplots[0][i + 1] t0 = time() tsne = manifold.TSNE(n_components=n_components, init='random', random_state=0, perplexity=perplexity) Y = tsne.fit_transform(X) t1 = time() print("circles, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[red, 0], Y[red, 1], c="r") ax.scatter(Y[green, 0], Y[green, 1], c="g") ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis('tight') # Another example using s-curve X, color = datasets.make_s_curve(n_samples, random_state=0) ax = subplots[1][0] ax.scatter(X[:, 0], X[:, 2], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) for i, perplexity in enumerate(perplexities): ax = subplots[1][i + 1] t0 = time() tsne = manifold.TSNE(n_components=n_components, init='random', random_state=0, perplexity=perplexity) Y = tsne.fit_transform(X) t1 = time() print("S-curve, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[:, 0], Y[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis('tight') # Another example using a 2D uniform grid x = np.linspace(0, 1, int(np.sqrt(n_samples))) xx, yy = np.meshgrid(x, x) X = np.hstack([ xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1), ]) color = xx.ravel() ax = subplots[2][0] ax.scatter(X[:, 0], X[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) for i, perplexity in enumerate(perplexities): ax = subplots[2][i + 1] t0 = time() tsne = manifold.TSNE(n_components=n_components, init='random', random_state=0, perplexity=perplexity) Y = tsne.fit_transform(X) t1 = time() print("uniform grid, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[:, 0], Y[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis('tight') plt.show()
bsd-3-clause
andrewwarrington/vesicle-cnn-2
vesicle-cnn-2/vesicle-cnn-2.py
1
24454
# MIT License # # Copyright (c) 2017, Andrew Warrington # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Adaptation of VESICLE-CNN (Roncal et al 2014) to be fully convolutional instead of patch-based. Script loads, builds, trains and then deploys a fully convolutional approximation of VESCILE-CNN. Work is described in more detail in [include citation]. """ # Import stock libraries. ---------------------------------------------------------------------------------------------- import os import time import timeit import matplotlib.pyplot as plt import h5py import numpy as np import tensorflow as tf import math import argparse from shutil import copyfile # Import additional libraries. ----------------------------------------------------------------------------------------- import utilities as util # Parse arguments. ----------------------------------------------------------------------------------------------------- parser = argparse.ArgumentParser(description='VESICLE-CNN-2 training and deployment framework.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--train_new', help='Are we training a new network? (mutually exclusive with --deploy_trained).', action='store_true') group.add_argument('--deploy_pretrained', help='File location of classifier to be deployed. (mutually exclusive with --training).', default=False) parser.add_argument('--gpu', help='GPU ID to run computations on.', default=False) parser.add_argument('--train_fraction', help='Fraction of training batches that are positive instances.', default=0.1) parser.add_argument('--positive_weight', help='The balancing weight used in weighted cross entropy calculations.', default=10) parser.add_argument('--deploy_train', help='Deploy network to train data set?', action='store_true', default=True) parser.add_argument('--deploy_validation', help='Deploy network to validation dataset', action='store_true', default=True) parser.add_argument('--deploy_test', help='Deploy network to test data set', action='store_true', default=False) parser.add_argument('--deploy_unlabelled', help='Deploy network to test dataset', action='store_true', default=False) args = parser.parse_args() # Configure GPU settings. ---------------------------------------------------------------------------------------------- if args.gpu: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152 os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu ''' Configure network settings. -------------------------------------------------------------------------------------''' # Configure network architecture parameters. patchSize = [67, 67] imSizeFC = [5, 5] convolutionalFilters = 48 firstLayerDimensions = [5, 5, 1, convolutionalFilters] secondLayerDimensions = [5, 5, convolutionalFilters, convolutionalFilters] thirdLayerDimensions = [5, 5, convolutionalFilters, convolutionalFilters] fcNeurons = 1024 fcLayerDimensions = [imSizeFC[0], imSizeFC[1], convolutionalFilters, fcNeurons] # Configure training parameters. trainingSteps = 300000 batch_size = 100 pos_frac = float(args.train_fraction) pos_weight = float(args.positive_weight) learningRate = 1e-04 valRegularity = 1000 valFirst = 150000 # Define data locations. dataLocations = ['./../kasthuri_data/train/train.h5', './../kasthuri_data/validation/validation.h5', './../kasthuri_data/test/test.h5'] channelLocations = ['/synapse'] # Label location _within_ the H5 file. internalLocations = ['SYN'] imgLocation = '/image' # Image location _within_ the H5 file. # Define experimental setup. training = args.train_new deployTrain = args.deploy_train deployValidation = args.deploy_validation deployTest = args.deploy_test deployUnlabelled = args.deploy_unlabelled load_path = args.deploy_pretrained # Misc. trainingSteps += 1 ''' Read in images from h5. -----------------------------------------------------------------------------------------''' [trainImage, trainLabels] = util.load_data(dataLocations[0], imgLocation, channelLocations, internalLocations) [validateImage, validateLabels] = util.load_data(dataLocations[1], imgLocation, channelLocations, internalLocations) [testImage, testLabels] = util.load_data(dataLocations[2], imgLocation, channelLocations, internalLocations) assert trainImage.shape[1] == trainImage.shape[2] # Configure settings pertaining to window size. border = int(math.floor(patchSize[0]/2)) windowSize = [trainImage.shape[1], trainImage.shape[2]] finalSize = [windowSize[0] - 2*border, windowSize[1] - 2*border] imSize = trainImage.shape imElements = windowSize[0] * windowSize[1] trainImages = trainImage.shape[0] validationImages = validateImage.shape[0] testImages = testImage.shape[0] # Prepare locations lists. pad = int(math.floor(patchSize[0] / 2)) positive_locations = np.where(trainLabels['SYN'][:, pad:imSize[1]-pad, pad:imSize[2]-pad, :]) negative_locations = np.where(1 - trainLabels['SYN'][:, pad:imSize[1]-pad, pad:imSize[2]-pad, :]) ''' Configure output file -------------------------------------------------------------------------------------------''' # Configure file for output. if not args.deploy_pretrained: fileOutputName = "Results/VCNN-2_" + time.strftime("%Y_%m_%d") + "_" + time.strftime("%H_%M_%S") else: fileOutputName = args.deploy_pretrained if not os.path.exists(fileOutputName): os.makedirs(fileOutputName) reportLocation = fileOutputName + "/report.txt" util.echo_to_file(reportLocation, "\n-- VESICLE-CNN-2 synapse detector. --\n") util.echo_to_file(reportLocation, "Experiment to train VESICLE-CNN-2 to predict synapses.\n") util.echo_to_file(reportLocation, "Experiment conducted at:" + time.strftime("%d/%m/%Y") + " " + time.strftime("%H:%M:%S") + ".\n\n") if args.deploy_pretrained is not False: util.echo_to_file(reportLocation, "Deploying pre-trained network saved at %s" % args.deploy_pretrained) else: util.echo_to_file(reportLocation, "Training new network.") # Copy current version of this script, as well as the makefile just to make sure we capture the experiment. if not os.path.exists(fileOutputName + "/backup"): os.makedirs(fileOutputName + "/backup") copyfile('./vesicle-cnn-2.py', fileOutputName + "/backup/vesicle-cnn-2.py") copyfile('./Makefile', fileOutputName + "/backup/Makefile") util.echo_to_file(reportLocation, "Experimental setup:\n") util.echo_to_file(reportLocation, "Training settings:\n") util.echo_to_file(reportLocation, "\tLearning scheme: ADAM\n") util.echo_to_file(reportLocation, "\tLearning rate: %f\n" % learningRate) util.echo_to_file(reportLocation, "\tTraining steps: %s\n" % trainingSteps) util.echo_to_file(reportLocation, "\tOptimize weighted cross entropy\n") util.echo_to_file(reportLocation, "\tSelect best network from F1\n\n") util.echo_to_file(reportLocation, "\tTraining ratio: %f\n" % pos_frac) util.echo_to_file(reportLocation, "\tTraining weight: %f\n" % pos_weight) util.echo_to_file(reportLocation, "\tBatch size: %f\n" % batch_size) util.echo_to_file(reportLocation, "\tFirst validation at: %f\n" % valFirst) util.echo_to_file(reportLocation, "Architecture settings:\n") util.echo_to_file(reportLocation, "\tConvolution layer 1: %s\n" % firstLayerDimensions) util.echo_to_file(reportLocation, "\tConvolution layer 2: %s\n" % secondLayerDimensions) util.echo_to_file(reportLocation, "\tConvolution layer 3: %s\n" % thirdLayerDimensions) util.echo_to_file(reportLocation, "\tFC units: %f\n" % fcNeurons) util.echo_to_file(reportLocation, "\tInput patch size: %s\n" % patchSize) ''' Configure TensorFlow graph -------------------------------------------------------------------------------------''' util.echo_to_file(reportLocation, "\nConfiguring network.\n") # Create placeholders for independent and dependant variables once batch has been selected. with tf.name_scope('Input_Image'): x = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='Image') # Independent variables. # Reshape to amenable shape. # x_image = tf.reshape(x, [-1, windowSize[0], windowSize[1], 1]) with tf.name_scope('Input_Synapse'): y_syn = tf.placeholder(tf.float32, shape=[None, 2]) # Target values. with tf.name_scope('First_Layer'): # Create first convolutional layer. (No pooling.) W_conv1 = util.weight_variable(firstLayerDimensions, "w_conv_1") # Weights in first layer. b_conv1 = util.bias_variable([firstLayerDimensions[3]], "b_conv_1") # Biases in first layer. h_conv1 = tf.nn.relu(util.conv2d(x, W_conv1, valid=True, stride=1) + b_conv1) # Perform convolution (with zero padding) and apply ReLU. h_pool1 = util.max_pool(h_conv1, 1, kernelWidth=2) with tf.name_scope('Second_Layer'): # Create first convolutional layer. (No pooling.) W_conv2 = util.weight_variable(secondLayerDimensions, "w_conv_2") # Weights in first layer. b_conv2 = util.bias_variable([secondLayerDimensions[3]], "b_conv_2") # Biases in first layer. h_conv2 = tf.nn.relu(util.atrous_conv2d(h_pool1, W_conv2, valid=True, rate=2) + b_conv2) # Perform convolution (with zero padding) and apply ReLU. h_pool2 = util.atrous_max_pool(h_conv2, mask_size=2, rate=2) with tf.name_scope('Third_Layer'): # Create first convolutional layer. (No pooling.) W_conv3 = util.weight_variable(thirdLayerDimensions, "w_conv_3") # Weights in first layer. b_conv3 = util.bias_variable([thirdLayerDimensions[3]], "b_conv_3") # Biases in first layer. h_conv3 = tf.nn.relu(util.atrous_conv2d(h_pool2, W_conv3, valid=True, rate=4) + b_conv3) # Perform convolution (with zero padding) and apply ReLU. h_pool3 = util.atrous_max_pool(h_conv3, mask_size=2, rate=4) with tf.name_scope('fccnn_Layer'): # Create FC layer for final classification. W_fccnn1 = util.weight_variable(fcLayerDimensions, "w_fccnn_1") # Image patch for FC, with firstFCNeurons neurons. b_fccnn1 = util.bias_variable([fcLayerDimensions[3]], "b_fccnn_1") # Biases for firstFCNeurons neurons. h_fccnn1 = tf.nn.relu(util.atrous_conv2d(h_pool3, W_fccnn1, valid=True, rate=8) + b_fccnn1) # Perform convolution (with zero padding) and apply ReLU. # Insert more FC layers here. with tf.name_scope('Output_Layer'): # Now add a final sigmoid layer for prediction of 0-1 probability and readout. W_fccnn2 = util.weight_variable([1, 1, fcLayerDimensions[3], 2], "w_fccnn_2") b_fccnn2 = util.bias_variable([2], "b_fccnn_2") y_syn_logit = tf.nn.relu(util.conv2d(h_fccnn1, W_fccnn2, valid=True) + b_fccnn2) y_syn_soft = tf.nn.softmax(y_syn_logit) y_syn_logit_flat = tf.reshape(y_syn_logit, [-1, 2]) y_syn_soft_flat = tf.reshape(y_syn_soft, [-1, 2]) # Add ops to save and restore all the variables. saver = tf.train.Saver() # Define cross entropy as loss function. with tf.name_scope('XEnt'): cross_entropy = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=y_syn, logits=y_syn_logit_flat, pos_weight=pos_weight, name='syn_Loss')) # Calculate accuracy as an average across vector. with tf.name_scope('Acc'): # Get the predictions for later evaluation. predictions = tf.argmax(y_syn_soft, 3) # Binary vector of correct predictions. correct_prediction = tf.equal(tf.argmax(y_syn_logit_flat, 1), tf.argmax(y_syn, 1)) # Now calc accuracy. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Calculate f1 accuracyF1, precision, recall, fmeasure = util.tf_calculate_PR(y_syn_soft_flat, y_syn) # Use ADAM as optimizer to minimize the cross entropy. train_step = tf.train.AdamOptimizer(learningRate).minimize(cross_entropy) # Merge all the summaries and write them out. writer = tf.summary.FileWriter(fileOutputName + '/LOGS', graph=tf.get_default_graph()) with tf.name_scope('Losses'): tf.summary.scalar("cost", cross_entropy) tf.summary.scalar("accuracy", accuracy) with tf.name_scope('F-Metrics'): tf.summary.scalar("accuracy_(f1)", accuracyF1) tf.summary.scalar("precision", precision) tf.summary.scalar("recall", recall) tf.summary.scalar("fmeasure", fmeasure) # Merge all summaries into a single "operation" which we can execute in a session. summary_op = tf.summary.merge_all() # Use an interactive session for debugging. config_opt = tf.ConfigProto() config_opt.gpu_options.allow_growth = True sess = tf.Session(config=config_opt) # Initialize the variables. sess.run(tf.global_variables_initializer()) # Declare misc variables for storing times etc. f1s = [] xEnts = [] accs = [] trainTimes = np.zeros((trainingSteps, 1)) gpuTimes = np.zeros((trainingSteps, 1)) ''' Train network -------------------------------------------------------------------------------------------''' # Function to automate application of a classifier to the validation volume. def validate_network(_f1s=[], _accs=[], _xents=[], final_val=False, first_val=False): val_cross_entropy, val_accuracy, val_fmeasure = np.zeros((validationImages, 8)), np.zeros((validationImages, 8)), np.zeros((validationImages, 8)) for j in range(validationImages): for k in range(8): val_batch = util.get_minibatch_image(validateImage, validateLabels, batch_size=1, valN=j, orientation=k, border=border) reshaped = np.reshape(val_batch[0], [-1, windowSize[0], windowSize[1], 1]) val_cross_entropy[j, k], val_accuracy[j, k], val_fmeasure[j, k] = sess.run([cross_entropy, accuracy, fmeasure], feed_dict={x: reshaped, y_syn: val_batch[1]['SYN']}) validation_accuracy = np.average(np.average(val_accuracy)) validation_cross_entropy = np.average(np.average(val_cross_entropy)) validation_fmeasure = np.average(np.average(val_fmeasure)) _f1s.append(validation_fmeasure) _accs.append(validation_accuracy) _xents.append(validation_fmeasure) if (np.nanmax(f1s) == validation_fmeasure) | first_val: saver.save(sess, fileOutputName + "/CNN.ckpt") if not final_val: output_string = ("step %d, validation accuracy %g, cross entropy %g, f1(ave) %g\n" % (i, validation_accuracy, validation_cross_entropy, validation_fmeasure)) else: output_string = ("Validation accuracy using single best validated model, applied to whole of validation set: \n\n\t Validation error: %g\n\t Validation XEnt: %g\n\t Validation F1: %g\n\t" % (_accs[0], _xents[0], _f1s[0])) util.echo_to_file(reportLocation, output_string) return _f1s, _accs, _xents # If we are training the network (as opposed to deploying an existing network). if training: util.echo_to_file(reportLocation, "\nTraining network.\n") for i in range(trainingSteps): if ((i % valRegularity) == 0) and ((i >= valFirst) or (i == 0)): f1s, accs, xEnts = validate_network(f1s, accs, xEnts, first_val=(i==0)) startTime = timeit.default_timer() batch = util.get_minibatch_patch(trainImage, trainLabels['SYN'], batch_size, patchSize, pos_frac=pos_frac, pos_locs=positive_locations, neg_locs=negative_locations) startTimeGPU = timeit.default_timer() _, summary = sess.run([train_step, summary_op], feed_dict={x: batch[0], y_syn: batch[1]}) elapsed = timeit.default_timer() - startTime gpuElapsed = timeit.default_timer() - startTimeGPU trainTimes[i] = elapsed gpuTimes[i] = gpuElapsed writer.add_summary(summary, i) av = np.sum(trainTimes) / trainingSteps gpu_av = np.sum(gpuTimes) / trainingSteps # Now write the timings to the output file. util.echo_to_file(reportLocation, "\nAverage training step time: %g s (%g GPU s). \n\n" % (av, gpu_av)) # Restore the best net. saver.restore(sess, fileOutputName + "/CNN.ckpt") # Do final validation on network. validate_network(final_val=True) else: # Odd hack required. Deployment to GPU without taking at least a single training step causes memory error. TODO fix this. batch = util.get_minibatch_patch(trainImage, trainLabels['SYN'], batch_size, patchSize, pos_frac=pos_frac, pos_locs=positive_locations, neg_locs=negative_locations) _, summary = sess.run([train_step, summary_op], feed_dict={x: batch[0], y_syn: batch[1]}) ''' Define targets to allow for validation and deployment of network ---------------------------------------------''' # Apply the classifier (defined by _func) to the image stack (defined by _images). def apply_classifier(_func, _images): _numFrames = _images.shape[0] volume_prediction = np.zeros((_numFrames, finalSize[0], finalSize[1], 2)) _application_times = np.zeros((_numFrames, 1)) _gpu_times = np.zeros((_numFrames, 1)) for i in range(_numFrames): startTime = timeit.default_timer() _single_im = np.expand_dims(_images[i, :, :].astype(np.float32), axis=0) startTimeGPU = timeit.default_timer() predFlat = sess.run(_func, feed_dict={x: _single_im}) elapsed = timeit.default_timer() - startTimeGPU _gpu_times[i] = elapsed singlePred = np.reshape(predFlat[0, :, :, 0], finalSize) volume_prediction[i, :, :, 0] = singlePred singlePred = np.reshape(predFlat[0, :, :, 1], finalSize) volume_prediction[i, :, :, 1] = singlePred elapsed = timeit.default_timer() - startTime _application_times[i] = elapsed print("Prediction of layer %g/%g complete." % (i + 1, _numFrames)) av = np.sum(_application_times) / _numFrames gpu_av = np.sum(_gpu_times) / _numFrames util.echo_to_file(reportLocation, "\nAverage time application time per frame: %g s (%g GPU s). \n\n" % (av, gpu_av)) return volume_prediction # Evaluate F1 score for a test volume (volume_prediction) to a ground truth volume (_labels). # Use _channel as a tag for output. def evaluate_f1(volume_prediction, _labels, _channel): precision, recall, f1 = util.calculate_PR(volume_prediction, _labels) # Now write the timings to the output file. util.echo_to_file(reportLocation, "\n" + _channel + "\n") util.echo_to_file(reportLocation, "\nPrecision: %g \n" % precision) util.echo_to_file(reportLocation, "\nRecall: %g \n" % recall) util.echo_to_file(reportLocation, "\nF1: %g \n" % f1) util.echo_to_file(reportLocation, "\n") return precision, recall, f1 # Deploy the classifier, defined by its logit function (_logit_func), to an image stack (_image). # Which channel to apply to is defined by _channel. # _set defines whether we are applying to train, validate or test set, and stores results inside _file. # _label then defines the ground truth stack (may not exist for new, unlabelled data). def deploy_to_channel(_logit_func, _image, _channel, _set, _file, _label=None): print(_channel + " " + _set + " prediction.") _file.create_dataset('image', data=_image) logits = apply_classifier(_logit_func, _image) # Now save output and truth values sed. group = _file.create_group(_channel) group.create_dataset('zeros', data=np.squeeze(logits[:, :, :, 0])) group.create_dataset('ones', data=np.squeeze(logits[:, :, :, 1])) if _label is not None: trimmed_labels = _label[:, border:finalSize[0]+border, border:finalSize[1]+border, :] group.create_dataset('truth', data=np.squeeze(trimmed_labels)) # Create P-R metrics using softmax layer. prediction = (logits[:, :, :, 1] > logits[:, :, :, 0]).astype(np.int8) precision, recall, f1 = evaluate_f1(prediction, trimmed_labels, _channel + "-" + _set) group.attrs['Precision'] = precision group.attrs['Recall'] = recall group.attrs['F1'] = f1 # Script for testing speed of application of _func to _image. def application_speed_test(_func, _image): _application_times = np.zeros((_image.size[0], 1)) for i in range(_image.size[0]): _single_im = np.expand_dims(trainImage[i, :, :].astype(np.float32), axis=0) startTime = timeit.default_timer() _ = sess.run(_func, feed_dict={x: _single_im, keep_prob: 1.0}) elapsed = timeit.default_timer() - startTime _application_times[i] = elapsed av = np.sum(_application_times) / _image.size[0] util.echo_to_file(reportLocation, "\nAverage application time per frame: %g s \n" % av) # Now lets go and deploy the algorithm to the datasets. if deployTrain | deployValidation | deployTest | deployUnlabelled: # Load the correct classifier file. # If we have been training, re-load the optimally trained classifier. # Else, load the classifier defined by the input. if training: saver.restore(sess, fileOutputName + "/CNN.ckpt") load_path = fileOutputName else: saver.restore(sess, load_path + "/CNN.ckpt") util.echo_to_file(reportLocation, "\nTesting network parameters saved at: " + load_path + "\n") if deployTrain: util.echo_to_file(reportLocation, "Beginning dense application to training set.") # Create h5 file for saving output. h5f = h5py.File(fileOutputName + '/train_results.h5', 'w') h5f.attrs['Creation_Date'] = time.strftime("%Y_%m_%d") + "_" + time.strftime("%H_%M_%S") h5f.attrs['Data_Set'] = "Training" h5f.attrs['Network_Location'] = load_path h5f.attrs['Network'] = "VESICLE-CNN-2" deploy_to_channel(y_syn_logit, trainImage, 'syn', 'train', h5f, trainLabels['SYN']) # Syn. h5f.close() if deployValidation: util.echo_to_file(reportLocation, "Beginning dense validation to training set.") # Create h5 file for saving output. h5f = h5py.File(fileOutputName + '/validation_results.h5', 'w') h5f.attrs['Creation_Date'] = time.strftime("%Y_%m_%d") + "_" + time.strftime("%H_%M_%S") h5f.attrs['Data_Set'] = "Validation" h5f.attrs['Network_Location'] = load_path h5f.attrs['Network'] = "VESICLE-CNN-2" deploy_to_channel(y_syn_logit, validateImage, 'syn', 'validation', h5f, validateLabels['SYN']) # Syn. h5f.close() if deployTest: util.echo_to_file(reportLocation, "Beginning dense application to test set.") # Create h5 file for saving output. h5f = h5py.File(fileOutputName + '/test_results.h5', 'w') h5f.attrs['Creation_Date'] = time.strftime("%Y_%m_%d") + "_" + time.strftime("%H_%M_%S") h5f.attrs['Data_Set'] = "Test" h5f.attrs['Network_Location'] = load_path h5f.attrs['Network'] = "VESICLE-CNN-2" deploy_to_channel(y_syn_logit, testImage, 'syn', 'test', h5f, testLabels['SYN']) # Syn. h5f.close() if deployUnlabelled: util.echo_to_file(reportLocation, "Beginning dense application to unlabelled set.") # We need to load this one. unlabelledLoc = dataLocations[0] unlabelledImage, _ = util.load_data(unlabelledLoc, imgLocation) # Create h5 file for saving output. h5f = h5py.File(fileOutputName + '/unlabelled_results.h5', 'w') h5f.create_dataset('image', data=testImage) h5f.attrs['Creation_Date'] = time.strftime("%Y_%m_%d") + "_" + time.strftime("%H_%M_%S") h5f.attrs['Data_Set'] = "Unlabelled" h5f.attrs['Network_Location'] = load_path h5f.attrs['Network'] = "VESICLE-CNN-2" deploy_to_channel(y_syn_logit, testImage, 'syn', 'unlabelled', h5f) # Syn. h5f.close() # Close the TF session to release the resources. sess.close() del sess # Now run the MATLAB accuracy evaluation script. # This makes a call to matlab and passes in the arguements to the evaluation script. # This is only done if it has also been deployed to validation and test datasets. if deployValidation & deployTest: # Make the MATLAB call. # GPU resources dont seem to be properly released so this call `blocks' the GPU memory... need to fix this somehow. os.system('matlab -r "addpath(genpath(\'../evaluation\')); wrap_synapse_pr(\'./' + fileOutputName +'\' ,\'syn\'); wrap_voxel_pr(\'./' + fileOutputName +'\' ,\'syn\'); exit"') ## Finish up. util.echo_to_file(reportLocation, "-- End of VESICLE-CNN-2 report. --")
mit
fyffyt/scikit-learn
examples/feature_stacker.py
246
1906
""" ================================================= Concatenating multiple feature extraction methods ================================================= In many real-world examples, there are many ways to extract features from a dataset. Often it is beneficial to combine several methods to obtain good performance. This example shows how to use ``FeatureUnion`` to combine features obtained by PCA and univariate selection. Combining features using this transformer has the benefit that it allows cross validation and grid searches over the whole process. The combination used in this example is not particularly helpful on this dataset and is only used to illustrate the usage of FeatureUnion. """ # Author: Andreas Mueller <amueller@ais.uni-bonn.de> # # License: BSD 3 clause from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.grid_search import GridSearchCV from sklearn.svm import SVC from sklearn.datasets import load_iris from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest iris = load_iris() X, y = iris.data, iris.target # This dataset is way to high-dimensional. Better do PCA: pca = PCA(n_components=2) # Maybe some original features where good, too? selection = SelectKBest(k=1) # Build estimator from PCA and Univariate selection: combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)]) # Use combined features to transform dataset: X_features = combined_features.fit(X, y).transform(X) svm = SVC(kernel="linear") # Do grid search over k, n_components and C: pipeline = Pipeline([("features", combined_features), ("svm", svm)]) param_grid = dict(features__pca__n_components=[1, 2, 3], features__univ_select__k=[1, 2], svm__C=[0.1, 1, 10]) grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10) grid_search.fit(X, y) print(grid_search.best_estimator_)
bsd-3-clause
BryanCutler/spark
python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby.py
1
23729
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import pandas as pd from pyspark import pandas as ps from pyspark.pandas.config import set_option, reset_option from pyspark.pandas.testing.utils import ReusedSQLTestCase, SQLTestUtils class OpsOnDiffFramesGroupByTest(ReusedSQLTestCase, SQLTestUtils): @classmethod def setUpClass(cls): super().setUpClass() set_option("compute.ops_on_diff_frames", True) @classmethod def tearDownClass(cls): reset_option("compute.ops_on_diff_frames") super().tearDownClass() def test_groupby_different_lengths(self): pdfs1 = [ pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2], "d": list("abcdefght")}), pd.DataFrame({"c": [4, 2, 7, None, 1, 1, 2], "d": list("abcdefg")}), pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2, 2], "d": list("abcdefghti")}), ] pdfs2 = [ pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}), pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 7], "b": [4, 2, 7, 3, 3, 1, 1, 2]}), pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}), ] for pdf1, pdf2 in zip(pdfs1, pdfs2): kdf1 = ps.from_pandas(pdf1) kdf2 = ps.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values("c").reset_index(drop=True) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index).sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index).sum()), almost=as_index, ) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index).c.sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index).c.sum()), almost=as_index, ) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index)["c"].sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index)["c"].sum()), almost=as_index, ) def test_groupby_multiindex_columns(self): pdf1 = pd.DataFrame( {("y", "c"): [4, 2, 7, 3, None, 1, 1, 1, 2], ("z", "d"): list("abcdefght")} ) pdf2 = pd.DataFrame( {("x", "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7], ("x", "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2]} ) kdf1 = ps.from_pandas(pdf1) kdf2 = ps.from_pandas(pdf2) self.assert_eq( kdf1.groupby(kdf2[("x", "a")]).sum().sort_index(), pdf1.groupby(pdf2[("x", "a")]).sum().sort_index(), ) self.assert_eq( kdf1.groupby(kdf2[("x", "a")], as_index=False) .sum() .sort_values(("y", "c")) .reset_index(drop=True), pdf1.groupby(pdf2[("x", "a")], as_index=False) .sum() .sort_values(("y", "c")) .reset_index(drop=True), ) self.assert_eq( kdf1.groupby(kdf2[("x", "a")])[[("y", "c")]].sum().sort_index(), pdf1.groupby(pdf2[("x", "a")])[[("y", "c")]].sum().sort_index(), ) def test_split_apply_combine_on_series(self): pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]}) pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]}) kdf1 = ps.from_pandas(pdf1) kdf2 = ps.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True) with self.subTest(as_index=as_index): self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).sum()), sort(pdf1.groupby(pdf2.A, as_index=as_index).sum()), ) self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).B.sum()), sort(pdf1.groupby(pdf2.A, as_index=as_index).B.sum()), ) self.assert_eq( sort(kdf1.groupby([kdf1.C, kdf2.A], as_index=as_index).sum()), sort(pdf1.groupby([pdf1.C, pdf2.A], as_index=as_index).sum()), ) self.assert_eq( sort(kdf1.groupby([kdf1.C + 1, kdf2.A], as_index=as_index).sum()), sort(pdf1.groupby([pdf1.C + 1, pdf2.A], as_index=as_index).sum()), ) self.assert_eq( kdf1.B.groupby(kdf2.A).sum().sort_index(), pdf1.B.groupby(pdf2.A).sum().sort_index(), ) self.assert_eq( (kdf1.B + 1).groupby(kdf2.A).sum().sort_index(), (pdf1.B + 1).groupby(pdf2.A).sum().sort_index(), ) self.assert_eq( kdf1.B.groupby(kdf2.A.rename()).sum().sort_index(), pdf1.B.groupby(pdf2.A.rename()).sum().sort_index(), ) self.assert_eq( kdf1.B.rename().groupby(kdf2.A).sum().sort_index(), pdf1.B.rename().groupby(pdf2.A).sum().sort_index(), ) self.assert_eq( kdf1.B.rename().groupby(kdf2.A.rename()).sum().sort_index(), pdf1.B.rename().groupby(pdf2.A.rename()).sum().sort_index(), ) def test_aggregate(self): pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]}) pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]}) kdf1 = ps.from_pandas(pdf1) kdf2 = ps.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True) with self.subTest(as_index=as_index): self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).agg("sum")), sort(pdf1.groupby(pdf2.A, as_index=as_index).agg("sum")), ) self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})), sort(pdf1.groupby(pdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})), ) self.assert_eq( sort( kdf1.groupby(kdf2.A, as_index=as_index).agg( {"B": ["min", "max"], "C": "sum"} ) ), sort( pdf1.groupby(pdf2.A, as_index=as_index).agg( {"B": ["min", "max"], "C": "sum"} ) ), ) self.assert_eq( sort(kdf1.groupby([kdf1.C, kdf2.A], as_index=as_index).agg("sum")), sort(pdf1.groupby([pdf1.C, pdf2.A], as_index=as_index).agg("sum")), ) self.assert_eq( sort(kdf1.groupby([kdf1.C + 1, kdf2.A], as_index=as_index).agg("sum")), sort(pdf1.groupby([pdf1.C + 1, pdf2.A], as_index=as_index).agg("sum")), ) # multi-index columns columns = pd.MultiIndex.from_tuples([("Y", "C"), ("X", "B")]) pdf1.columns = columns kdf1.columns = columns columns = pd.MultiIndex.from_tuples([("X", "A")]) pdf2.columns = columns kdf2.columns = columns for as_index in [True, False]: stats_kdf = kdf1.groupby(kdf2[("X", "A")], as_index=as_index).agg( {("X", "B"): "min", ("Y", "C"): "sum"} ) stats_pdf = pdf1.groupby(pdf2[("X", "A")], as_index=as_index).agg( {("X", "B"): "min", ("Y", "C"): "sum"} ) self.assert_eq( stats_kdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True), stats_pdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True), ) stats_kdf = kdf1.groupby(kdf2[("X", "A")]).agg( {("X", "B"): ["min", "max"], ("Y", "C"): "sum"} ) stats_pdf = pdf1.groupby(pdf2[("X", "A")]).agg( {("X", "B"): ["min", "max"], ("Y", "C"): "sum"} ) self.assert_eq( stats_kdf.sort_values( by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")] ).reset_index(drop=True), stats_pdf.sort_values( by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")] ).reset_index(drop=True), ) def test_duplicated_labels(self): pdf1 = pd.DataFrame({"A": [3, 2, 1]}) pdf2 = pd.DataFrame({"A": [1, 2, 3]}) kdf1 = ps.from_pandas(pdf1) kdf2 = ps.from_pandas(pdf2) self.assert_eq( kdf1.groupby(kdf2.A).sum().sort_index(), pdf1.groupby(pdf2.A).sum().sort_index() ) self.assert_eq( kdf1.groupby(kdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True), pdf1.groupby(pdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True), ) def test_apply(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey).apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)["a"].apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)[["a"]].apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).apply(lambda x: x + x.min()).sort_index(), pdf.groupby(["a", pkey]).apply(lambda x: x + x.min()).sort_index(), ) def test_transform(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey).transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)["a"].transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)[["a"]].transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).transform(lambda x: x + x.min()).sort_index(), pdf.groupby(["a", pkey]).transform(lambda x: x + x.min()).sort_index(), ) def test_filter(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(pkey).filter(lambda x: any(x.a == 2)).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].filter(lambda x: any(x == 2)).sort_index(), pdf.groupby(pkey)["a"].filter(lambda x: any(x == 2)).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(pkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(["a", pkey]).filter(lambda x: any(x.a == 2)).sort_index(), ) def test_head(self): pdf = pd.DataFrame( { "a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3, "b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3, "c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3, }, ) pkey = pd.Series([1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( pdf.groupby(pkey).head(2).sort_index(), kdf.groupby(kkey).head(2).sort_index() ) self.assert_eq( pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index() ) self.assert_eq( pdf.groupby("a")[["b"]].head(2).sort_index(), kdf.groupby("a")[["b"]].head(2).sort_index(), ) self.assert_eq( pdf.groupby([pkey, "b"]).head(2).sort_index(), kdf.groupby([kkey, "b"]).head(2).sort_index(), ) def test_cumcount(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) for ascending in [True, False]: self.assert_eq( kdf.groupby(kkey).cumcount(ascending=ascending).sort_index(), pdf.groupby(pkey).cumcount(ascending=ascending).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].cumcount(ascending=ascending).sort_index(), pdf.groupby(pkey)["a"].cumcount(ascending=ascending).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].cumcount(ascending=ascending).sort_index(), pdf.groupby(pkey)[["a"]].cumcount(ascending=ascending).sort_index(), ) def test_cummin(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cummin().sort_index(), pdf.groupby(pkey).cummin().sort_index() ) self.assert_eq( kdf.groupby(kkey)["a"].cummin().sort_index(), pdf.groupby(pkey)["a"].cummin().sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].cummin().sort_index(), pdf.groupby(pkey)[["a"]].cummin().sort_index(), ) def test_cummax(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cummax().sort_index(), pdf.groupby(pkey).cummax().sort_index() ) self.assert_eq( kdf.groupby(kkey)["a"].cummax().sort_index(), pdf.groupby(pkey)["a"].cummax().sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].cummax().sort_index(), pdf.groupby(pkey)[["a"]].cummax().sort_index(), ) def test_cumsum(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cumsum().sort_index(), pdf.groupby(pkey).cumsum().sort_index() ) self.assert_eq( kdf.groupby(kkey)["a"].cumsum().sort_index(), pdf.groupby(pkey)["a"].cumsum().sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].cumsum().sort_index(), pdf.groupby(pkey)[["a"]].cumsum().sort_index(), ) def test_cumprod(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cumprod().sort_index(), pdf.groupby(pkey).cumprod().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].cumprod().sort_index(), pdf.groupby(pkey)["a"].cumprod().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].cumprod().sort_index(), pdf.groupby(pkey)[["a"]].cumprod().sort_index(), almost=True, ) def test_diff(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, } ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq(kdf.groupby(kkey).diff().sort_index(), pdf.groupby(pkey).diff().sort_index()) self.assert_eq( kdf.groupby(kkey)["a"].diff().sort_index(), pdf.groupby(pkey)["a"].diff().sort_index() ) self.assert_eq( kdf.groupby(kkey)[["a"]].diff().sort_index(), pdf.groupby(pkey)[["a"]].diff().sort_index(), ) self.assert_eq(kdf.groupby(kkey).diff().sum(), pdf.groupby(pkey).diff().sum().astype(int)) self.assert_eq(kdf.groupby(kkey)["a"].diff().sum(), pdf.groupby(pkey)["a"].diff().sum()) def test_rank(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq(kdf.groupby(kkey).rank().sort_index(), pdf.groupby(pkey).rank().sort_index()) self.assert_eq( kdf.groupby(kkey)["a"].rank().sort_index(), pdf.groupby(pkey)["a"].rank().sort_index() ) self.assert_eq( kdf.groupby(kkey)[["a"]].rank().sort_index(), pdf.groupby(pkey)[["a"]].rank().sort_index(), ) self.assert_eq(kdf.groupby(kkey).rank().sum(), pdf.groupby(pkey).rank().sum()) self.assert_eq(kdf.groupby(kkey)["a"].rank().sum(), pdf.groupby(pkey)["a"].rank().sum()) @unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0") def test_shift(self): pdf = pd.DataFrame( { "a": [1, 1, 2, 2, 3, 3] * 3, "b": [1, 1, 2, 2, 3, 4] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 2, 3, 4] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).shift().sort_index(), pdf.groupby(pkey).shift().sort_index() ) self.assert_eq( kdf.groupby(kkey)["a"].shift().sort_index(), pdf.groupby(pkey)["a"].shift().sort_index() ) self.assert_eq( kdf.groupby(kkey)[["a"]].shift().sort_index(), pdf.groupby(pkey)[["a"]].shift().sort_index(), ) self.assert_eq(kdf.groupby(kkey).shift().sum(), pdf.groupby(pkey).shift().sum().astype(int)) self.assert_eq(kdf.groupby(kkey)["a"].shift().sum(), pdf.groupby(pkey)["a"].shift().sum()) def test_fillna(self): pdf = pd.DataFrame( { "A": [1, 1, 2, 2] * 3, "B": [2, 4, None, 3] * 3, "C": [None, None, None, 1] * 3, "D": [0, 1, 5, 4] * 3, } ) pkey = pd.Series([1, 1, 2, 2] * 3) kdf = ps.from_pandas(pdf) kkey = ps.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).fillna(0).sort_index(), pdf.groupby(pkey).fillna(0).sort_index() ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(0).sort_index(), pdf.groupby(pkey)["C"].fillna(0).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(0).sort_index(), pdf.groupby(pkey)[["C"]].fillna(0).sort_index(), ) self.assert_eq( kdf.groupby(kkey).fillna(method="bfill").sort_index(), pdf.groupby(pkey).fillna(method="bfill").sort_index(), ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(method="bfill").sort_index(), pdf.groupby(pkey)["C"].fillna(method="bfill").sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(method="bfill").sort_index(), pdf.groupby(pkey)[["C"]].fillna(method="bfill").sort_index(), ) self.assert_eq( kdf.groupby(kkey).fillna(method="ffill").sort_index(), pdf.groupby(pkey).fillna(method="ffill").sort_index(), ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(method="ffill").sort_index(), pdf.groupby(pkey)["C"].fillna(method="ffill").sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(method="ffill").sort_index(), pdf.groupby(pkey)[["C"]].fillna(method="ffill").sort_index(), ) if __name__ == "__main__": from pyspark.pandas.tests.test_ops_on_diff_frames_groupby import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
fspaolo/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
ContinuumIO/blaze
blaze/compute/bcolz.py
3
5577
from __future__ import absolute_import, division, print_function from weakref import WeakKeyDictionary from toolz import curry, concat, first, memoize from multipledispatch import MDNotImplementedError from ..expr import ( Distinct, ElemWise, Expr, Field, Head, Projection, Slice, Symbol, path, symbol, ) from ..expr.optimize import lean_projection, simple_selections from ..expr.split import split from ..partition import partitions from .core import compute from .pmap import get_default_pmap from collections import Iterator, Iterable import datashape import bcolz import numpy as np import pandas as pd from ..dispatch import dispatch from odo import into __all__ = ['bcolz'] COMFORTABLE_MEMORY_SIZE = 1e9 @memoize(cache=WeakKeyDictionary()) def box(type_): """Create a non-iterable box type for an object. Parameters ---------- type_ : type The type to create a box for. Returns ------- box : type A type to box values of type ``type_``. """ class c(object): __slots__ = 'value', def __init__(self, value): if not isinstance(value, type_): raise TypeError( "values must be of type '%s' (received '%s')" % ( type_.__name__, type(value).__name__, ), ) self.value = value c.__name__ = 'box(%s)' % type_.__name__ return c @dispatch(Expr, (box(bcolz.ctable), box(bcolz.carray))) def optimize(expr, _): return simple_selections(lean_projection(expr)) @dispatch(Expr, (bcolz.ctable, bcolz.carray)) def pre_compute(expr, data, scope=None, **kwargs): # box the data so that we don't need to deal with ambiguity of ctable # and carray being instances of the Iterator ABC. return box(type(data))(data) @dispatch(Expr, (box(bcolz.ctable), box(bcolz.carray))) def post_compute(expr, data, **kwargs): # Unbox the bcolz objects. return data.value @dispatch((box(bcolz.carray), box(bcolz.ctable))) def discover(data): val = data.value return datashape.from_numpy(val.shape, val.dtype) Cheap = (Head, ElemWise, Distinct, Symbol) @dispatch(Head, (box(bcolz.ctable), box(bcolz.carray))) def compute_down(expr, data, **kwargs): """ Cheap and simple computation in simple case If we're given a head and the entire expression is cheap to do (e.g. elemwises, selections, ...) then compute on data directly, without parallelism""" leaf = expr._leaves()[0] if all(isinstance(e, Cheap) for e in path(expr, leaf)): val = data.value return compute( expr, {leaf: into(Iterator, val)}, return_type='native', **kwargs ) else: raise MDNotImplementedError() @dispatch(Field, box(bcolz.ctable)) def compute_up(expr, data, **kwargs): return data.value[str(expr._name)] @dispatch(Projection, box(bcolz.ctable)) def compute_up(expr, data, **kwargs): return data.value[list(map(str, expr.fields))] @dispatch(Slice, (box(bcolz.carray), box(bcolz.ctable))) def compute_up(expr, data, **kwargs): return data.value[expr.index] def compute_chunk(source, chunk, chunk_expr, data_index): part = source[data_index] return compute(chunk_expr, {chunk: part}, return_type='native') def get_chunksize(data): if isinstance(data, bcolz.carray): return data.chunklen elif isinstance(data, bcolz.ctable): return min(data[c].chunklen for c in data.names) else: raise TypeError("Don't know how to compute chunksize for type %r" % type(data).__name__) @dispatch(Expr, (box(bcolz.carray), box(bcolz.ctable))) def compute_down(expr, data, chunksize=None, map=None, **kwargs): data = data.value if map is None: map = get_default_pmap() leaf = expr._leaves()[0] if chunksize is None: chunksize = max(2**16, get_chunksize(data)) # If the bottom expression is a projection or field then want to do # compute_up first children = { e for e in expr._traverse() if isinstance(e, Expr) and any(i is expr._leaves()[0] for i in e._inputs) } if len(children) == 1 and isinstance(first(children), (Field, Projection)): raise MDNotImplementedError() chunk = symbol('chunk', chunksize * leaf.schema) (chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr, chunk=chunk) data_parts = partitions(data, chunksize=(chunksize,)) parts = list(map(curry(compute_chunk, data, chunk, chunk_expr), data_parts)) if isinstance(parts[0], np.ndarray): intermediate = np.concatenate(parts) elif isinstance(parts[0], pd.DataFrame): intermediate = pd.concat(parts) elif isinstance(parts[0], Iterable): intermediate = list(concat(parts)) else: raise TypeError("Don't know how to concatenate objects of type %r" % type(parts[0]).__name__) return compute(agg_expr, {agg: intermediate}, return_type='native') def _asarray(a): if isinstance(a, (bcolz.carray, bcolz.ctable)): return a[:] return np.array(list(a)) @compute_down.register(Expr, (box(bcolz.carray), box(bcolz.ctable)), Iterable) @compute_down.register(Expr, Iterable, (box(bcolz.carray), box(bcolz.ctable))) def bcolz_mixed(expr, a, b, **kwargs): return compute( expr, dict(zip(expr._leaves(), map(_asarray, (a.value, b.value)))), return_type='native', )
bsd-3-clause
tdaylan/tdgu
infl.py
1
11244
import sys, platform, os from matplotlib import pyplot as plt import numpy as np import camb from camb import model, initialpower import h5py, os pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) cambdata = camb.get_background(pars) h5f = h5py.File(os.environ['PCAT_DATA_PATH'] + '/data/inpt/adis.h5', 'w') redssour = np.linspace(0., 5., 100) redshost = np.linspace(0., 5., 100) adis = cambdata.angular_diameter_distance(redssour) adistdim = np.zeros((100, 100)) for k, z0 in enumerate(redssour): for l, z1 in enumerate(redshost): adistdim[k, l] = cambdata.angular_diameter_distance2(z0, z1) h5f.create_dataset('reds', data=redssour) h5f.create_dataset('adis', data=adis) h5f.create_dataset('adistdim', data=adistdim) h5f.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06) pars.InitPower.set_params(ns=0.965, r=0) pars.set_for_lmax(2500, lens_potential_accuracy=0); path = '/Users/tansu/Desktop/infl/' os.system('mkdir -p %s' % path) #os.environ["FERM_IGAL_DATA_PATH"] cambdata = camb.get_results(pars) psec = cambdata.get_cmb_power_spectra(pars) psectotl = psec['total'] pseculen = psec['unlensed_scalar'] ls = np.arange(psectotl.shape[0]) fig, ax = plt.subplots(2,2, figsize = (12,12)) ax[0,0].plot(ls,psectotl[:,0], color='k') ax[0,0].plot(ls,pseculen[:,0], color='r') ax[0,0].set_title('TT') ax[0,1].plot(ls[2:], 1-pseculen[2:,0]/psectotl[2:,0]); ax[0,1].set_title(r'$\Delta TT$') ax[1,0].plot(ls,psectotl[:,1], color='k') ax[1,0].plot(ls,pseculen[:,1], color='r') ax[1,0].set_title(r'$EE$') ax[1,1].plot(ls,psectotl[:,3], color='k') ax[1,1].plot(ls,pseculen[:,3], color='r') ax[1,1].set_title(r'$TE$'); for ax in ax.reshape(-1): ax.set_xlim([2,2500]) plt.savefig(path + 'psec.pdf') plt.close() pars.WantTensors = True cambdata = camb.get_transfer_functions(pars) lmax=2000 rs = np.linspace(0,0.2,6) for r in rs: inflation_params = initialpower.InitialPowerParams() inflation_params.set_params(ns=0.96, r=r) cambdata.power_spectra_from_transfer(inflation_params) cl = cambdata.get_total_cls(lmax) plt.loglog(np.arange(lmax+1),cl[:,2]) plt.xlim([2,lmax]) plt.legend(rs, loc='lower right'); plt.savefig(path + 'tens.pdf') plt.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) pars.set_dark_energy() #re-set defaults pars.InitPower.set_params(ns=0.965) pars.set_matter_power(redshifts=[0., 0.8], kmax=2.0) pars.NonLinear = model.NonLinear_none cambdata = camb.get_results(pars) kh, z, pk = cambdata.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints = 200) s8 = np.array(cambdata.get_sigma8()) pars.NonLinear = model.NonLinear_both cambdata.calc_power_spectra(pars) kh_nonlin, z_nonlin, pk_nonlin = cambdata.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints = 200) for i, (redshift, line) in enumerate(zip(z,['-','--'])): plt.loglog(kh, pk[i,:], color='k', ls = line) plt.loglog(kh_nonlin, pk_nonlin[i,:], color='r', ls = line) plt.xlabel('k/h Mpc'); plt.legend(['linear','non-linear'], loc='lower left'); plt.title('Matter power at z=%s and z= %s'%tuple(z)); plt.savefig(path + 'psecnonl.pdf') plt.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) pars.InitPower.set_params(As=2e-9, ns=0.965) pars.set_for_lmax(2000, lens_potential_accuracy=1) ws = np.linspace(-1.5, -0.6, 5) for w in ws: pars.set_dark_energy(w) cambdata = camb.get_results(pars) cl = cambdata.get_lens_potential_cls(lmax=2000) plt.loglog(np.arange(2001), cl[:,0]) plt.savefig(path + 'psecdene.pdf') plt.close() pars.set_dark_energy() plt.legend(ws) plt.ylabel('$[L(L+1)]^2C_L^{\phi\phi}/2\pi$') plt.xlabel('$L$') plt.xlim([2,2000]); plt.savefig(path + 'dene.pdf') plt.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) cambdata = camb.get_background(pars) z = np.linspace(0,5,100) DA = cambdata.angular_diameter_distance(z) plt.plot(z, DA) plt.xlabel('$z$') plt.ylabel(r'$D_A /\rm{Mpc}$') plt.title('Angular diameter distance') plt.ylim([0,2000]); plt.savefig(path + 'distangl.pdf') plt.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) data = camb.get_transfer_functions(pars) transfer = data.get_cmb_transfer_data() fig, axs = plt.subplots(2,2, figsize=(12,8), sharex = True) for ix, ax in zip([3, 20, 40, 60],axs.reshape(-1)): ax.plot(transfer.q,transfer.delta_p_l_k[0,ix,:]) ax.set_title(r'$\ell = %s$'%transfer.l[ix]) if ix>1: ax.set_xlabel(r'$k \rm{Mpc}$') plt.savefig(path + 'tran.pdf') plt.close() trans = transfer ix = 0 _, axs = plt.subplots(1,2, figsize=(12,6)) for source_ix, (name, ax) in enumerate(zip(['T', 'E'], axs)): ax.semilogx(trans.q,trans.delta_p_l_k[source_ix,ix,:]) ax.set_xlim([1e-5, 0.05]) ax.set_xlabel(r'$k \rm{Mpc}$') ax.set_title(r'%s transfer function for $\ell = %s$'%(name, trans.l[ix])) plt.savefig(path + 'trantran.pdf') plt.close() pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) pars.WantScalars = False pars.WantTensors = True pars.set_accuracy(AccuracyBoost=2) data = camb.get_transfer_functions(pars) transfer = data.get_cmb_transfer_data('tensor') plt.figure(figsize=(14,3)) ixs=[13,19,21] ls = [transfer.l[i] for i in ixs] cols=['b','r','c'] for ix,col in zip(ixs, cols): k_weight = transfer.delta_p_l_k[2,ix,:]**2 k_weight /= np.sum(k_weight) plt.semilogx(transfer.q,k_weight, color=col) plt.xlim([1e-3, 0.1]) plt.legend(ls) plt.xlabel(r'$k \rm{Mpc}$') plt.title(r'Contribution to B from primordial tensor power spectrum for various $\ell$') derived = data.get_derived_params() for l,col in zip(ls,cols): plt.axvline(l/(1000*derived['DAstar']), color=col, ls=':', lw=2) plt.savefig(path + 'bmod.pdf') plt.close() k=10**np.linspace(-5, 1, 50) pars.InitPower.set_params(ns=0.96, r=0.2) #this functions imposes inflation consistency relation by default scalar_pk= pars.scalar_power(k) tensor_pk= pars.tensor_power(k) plt.semilogx(k,scalar_pk); plt.semilogx(k,tensor_pk); plt.xlabel(r'$k \rm{Mpc}$') plt.ylabel(r'${\cal P}(k)$') plt.legend(['scalar', 'tensor']); plt.savefig(path + 'scaltens.pdf') plt.close() pars = camb.set_params(H0=67.5, ombh2=0.022, omch2=0.122, As=2e-9, ns=0.95) data= camb.get_background(pars) eta = 10**(np.linspace(1, 4,300)) back_ev = data.get_background_time_evolution(eta, ['x_e', 'visibility']) fig, axs= plt.subplots(1,2, figsize=(12,5)) axs[0].semilogx(eta, back_ev['x_e']) axs[1].loglog(eta, back_ev['visibility']) axs[0].set_xlabel(r'$\eta/\rm{Mpc}$') axs[0].set_ylabel('$x_e$') axs[1].set_xlabel(r'$\eta/\rm{Mpc}$') axs[1].set_ylabel('Visibility'); fig.suptitle('Ionization history, including both hydrogen and helium recombination and reionization'); plt.savefig(path + 'ionz.pdf') plt.close() z = 10**np.linspace(2, 4, 300) back_ev = data.get_background_redshift_evolution(z, ['x_e', 'visibility'], format='array') fig, axs= plt.subplots(1,2, figsize=(12,5)) for i, (ax, label), in enumerate(zip(axs, ['$x_e$','Visibility'])): ax.semilogx(z, back_ev[:,i]) ax.set_xlabel('$z$') ax.set_ylabel(label) ax.set_xlim([500,1e4]) plt.savefig(path + 'visi.pdf') plt.close() eta = np.linspace(1, 400, 300) ks = [0.02,0.1] ev = data.get_time_evolution(ks, eta, ['delta_baryon','delta_photon']) _, axs= plt.subplots(1,2, figsize=(12,5)) for i, ax in enumerate(axs): ax.plot(eta,ev[i,:, 0]) ax.plot(eta,ev[i,:, 1]) ax.set_title('$k= %s$'%ks[i]) ax.set_xlabel(r'$\eta/\rm{Mpc}$'); plt.legend([r'$\Delta_b$', r'$\Delta_\gamma$'], loc = 'upper left'); plt.savefig(path + 'bary.pdf') plt.close() z = np.linspace(500,5000,300) ks = [0.02,0.1] ev = data.get_redshift_evolution(ks, z, ['delta_baryon','delta_cdm', 'delta_photon']) _, axs= plt.subplots(1,2, figsize=(12,5)) for i, ax in enumerate(axs): ax.plot(z,ev[i,:, 0]) ax.plot(z,ev[i,:, 1]) ax.plot(z,ev[i,:, 2]) ax.set_title(r'$k= %s/\rm{Mpc}$'%ks[i]) ax.set_xlabel('$z$'); plt.legend([r'$\Delta_b$', r'$\Delta_c$', r'$\Delta_\gamma$'], loc = 'upper right'); plt.savefig(path + 'barybary.pdf') plt.close() eta = 10**(np.linspace(0, 3, 500)) def plot_ev(ev, k): plt.figure(figsize=(8,6)) plt.loglog(eta,ev[:,0]) plt.loglog(eta,np.abs(ev[:,1])) plt.loglog(eta,-ev[:,2]) plt.title(r'$k= %s/\rm{Mpc}$'%k) plt.xlabel(r'$\eta/\rm{Mpc}$'); plt.legend([r'$\Delta_c$', r'$|\Delta_\gamma|$', r'$-(\Phi+\Psi)/2$'], loc = 'upper left'); plt.savefig(path + 'etaa%d.pdf' % k) plt.close() k=0.3 plot_ev(data.get_time_evolution(k, eta, ['delta_cdm','delta_photon', 'Weyl']),k) plot_ev(data.get_time_evolution(k, eta, ['delta_cdm','delta_photon', 'Weyl'],lAccuracyBoost=1),k) plot_ev(data.get_time_evolution(k, eta, ['delta_cdm','delta_photon', 'Weyl'],lAccuracyBoost=10),k) nz = 100 #number of steps to use for the radial/redshift integration kmax=10 #kmax to use pars = camb.CAMBparams() pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122) pars.InitPower.set_params(ns=0.965) cambdata= camb.get_background(pars) chistar = cambdata.conformal_time(0)- model.tau_maxvis.value chis = np.linspace(0,chistar,nz) zs=cambdata.redshift_at_comoving_radial_distance(chis) dchis = (chis[2:]-chis[:-2])/2 chis = chis[1:-1] zs = zs[1:-1] PK = camb.get_matter_power_interpolator(pars, nonlinear=True, hubble_units=False, k_hunit=False, kmax=kmax, var1=model.Transfer_Weyl,var2=model.Transfer_Weyl, zmax=zs[-1]) plt.figure(figsize=(8,5)) k=np.exp(np.log(10)*np.linspace(-4,2,200)) zplot = [0, 0.5, 1, 4 ,20] for z in zplot: plt.loglog(k, PK.P(z,k)) plt.xlim([1e-4,kmax]) plt.xlabel('k Mpc') plt.ylabel('$P_\Psi\, Mpc^{-3}$') plt.legend(['z=%s'%z for z in zplot]); plt.xlabel('$L$'); plt.savefig(path + 'psii.pdf') win = ((chistar-chis)/(chis**2*chistar))**2 ls = np.arange(2,2500+1, dtype=np.float64) cl_kappa=np.zeros(ls.shape) w = np.ones(chis.shape) #this is just used to set to zero k values out of range of interpolation for i, l in enumerate(ls): k=(l+0.5)/chis w[:]=1 w[k<1e-4]=0 w[k>=kmax]=0 cl_kappa[i] = np.dot(dchis, w*PK.P(zs, k, grid=False)*win/k**4) cl_kappa*= (ls*(ls+1))**2 pars.set_for_lmax(2500,lens_potential_accuracy=2) cambdata = camb.get_results(pars) cl_camb=cambdata.get_lens_potential_cls(2500) cl_limber= 4*cl_kappa/2/np.pi #convert kappa power to [l(l+1)]^2C_phi/2pi (what cl_camb is) plt.loglog(ls,cl_limber, color='b') plt.loglog(np.arange(2,cl_camb[:,0].size),cl_camb[2:,0], color='r') plt.xlim([1,2000]) plt.legend(['Limber','CAMB hybrid']) plt.ylabel('$[L(L+1)]^2C_L^{\phi}/2\pi$') plt.xlabel('$L$'); plt.savefig(path + 'kapp.pdf') plt.close() camb.set_halofit_version('takahashi') kh_nonlin, _, pk_takahashi = cambdata.get_nonlinear_matter_power_spectrum(params=pars) camb.set_halofit_version('mead') kh_nonlin, _, pk_mead = cambdata.get_nonlinear_matter_power_spectrum(params=pars) fig, axs=plt.subplots(2,1, sharex=True, figsize=(8,8)) axs[0].loglog(kh_nonlin, pk_takahashi[0]) axs[0].loglog(kh_nonlin, pk_mead[0]) axs[1].semilogx(kh_nonlin, pk_mead[0]/pk_takahashi[0]-1) axs[1].set_xlabel(r'$k/h\, \rm{Mpc}$') axs[1].legend(['Mead/Takahashi-1'], loc='upper left') plt.savefig(path + 'halo.pdf') plt.close()
mit
gcarq/freqtrade
tests/edge/test_edge.py
1
20379
# pragma pylint: disable=missing-docstring, C0103, C0330 # pragma pylint: disable=protected-access, too-many-lines, invalid-name, too-many-arguments import logging import math from unittest.mock import MagicMock import arrow import numpy as np import pytest from pandas import DataFrame, to_datetime from freqtrade.data.converter import ohlcv_to_dataframe from freqtrade.edge import Edge, PairInfo from freqtrade.exceptions import OperationalException from freqtrade.strategy.interface import SellType from tests.conftest import get_patched_freqtradebot, log_has from tests.optimize import (BTContainer, BTrade, _build_backtest_dataframe, _get_frame_time_from_offset) # Cases to be tested: # 1) Open trade should be removed from the end # 2) Two complete trades within dataframe (with sell hit for all) # 3) Entered, sl 1%, candle drops 8% => Trade closed, 1% loss # 4) Entered, sl 3%, candle drops 4%, recovers to 1% => Trade closed, 3% loss # 5) Stoploss and sell are hit. should sell on stoploss #################################################################### tests_start_time = arrow.get(2018, 10, 3) timeframe_in_minute = 60 _ohlc = {'date': 0, 'buy': 1, 'open': 2, 'high': 3, 'low': 4, 'close': 5, 'sell': 6, 'volume': 7} # Helpers for this test file def _validate_ohlc(buy_ohlc_sell_matrice): for index, ohlc in enumerate(buy_ohlc_sell_matrice): # if not high < open < low or not high < close < low if not ohlc[3] >= ohlc[2] >= ohlc[4] or not ohlc[3] >= ohlc[5] >= ohlc[4]: raise Exception('Line ' + str(index + 1) + ' of ohlc has invalid values!') return True def _build_dataframe(buy_ohlc_sell_matrice): _validate_ohlc(buy_ohlc_sell_matrice) data = [] for ohlc in buy_ohlc_sell_matrice: d = { 'date': tests_start_time.shift( minutes=( ohlc[0] * timeframe_in_minute)).int_timestamp * 1000, 'buy': ohlc[1], 'open': ohlc[2], 'high': ohlc[3], 'low': ohlc[4], 'close': ohlc[5], 'sell': ohlc[6]} data.append(d) frame = DataFrame(data) frame['date'] = to_datetime(frame['date'], unit='ms', utc=True, infer_datetime_format=True) return frame def _time_on_candle(number): return np.datetime64(tests_start_time.shift( minutes=(number * timeframe_in_minute)).int_timestamp * 1000, 'ms') # End helper functions # Open trade should be removed from the end tc0 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], [1, 5000, 5025, 4975, 4987, 6172, 0, 1]], # enter trade (signal on last candle) stop_loss=-0.99, roi={"0": float('inf')}, profit_perc=0.00, trades=[] ) # Two complete trades within dataframe(with sell hit for all) tc1 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], [1, 5000, 5025, 4975, 4987, 6172, 0, 1], # enter trade (signal on last candle) [2, 5000, 5025, 4975, 4987, 6172, 0, 0], # exit at open [3, 5000, 5025, 4975, 4987, 6172, 1, 0], # no action [4, 5000, 5025, 4975, 4987, 6172, 0, 0], # should enter the trade [5, 5000, 5025, 4975, 4987, 6172, 0, 1], # no action [6, 5000, 5025, 4975, 4987, 6172, 0, 0], # should sell ], stop_loss=-0.99, roi={"0": float('inf')}, profit_perc=0.00, trades=[BTrade(sell_reason=SellType.SELL_SIGNAL, open_tick=1, close_tick=2), BTrade(sell_reason=SellType.SELL_SIGNAL, open_tick=4, close_tick=6)] ) # 3) Entered, sl 1%, candle drops 8% => Trade closed, 1% loss tc2 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], [1, 5000, 5025, 4600, 4987, 6172, 0, 0], # enter trade, stoploss hit [2, 5000, 5025, 4975, 4987, 6172, 0, 0], ], stop_loss=-0.01, roi={"0": float('inf')}, profit_perc=-0.01, trades=[BTrade(sell_reason=SellType.STOP_LOSS, open_tick=1, close_tick=1)] ) # 4) Entered, sl 3 %, candle drops 4%, recovers to 1 % = > Trade closed, 3 % loss tc3 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], [1, 5000, 5025, 4800, 4987, 6172, 0, 0], # enter trade, stoploss hit [2, 5000, 5025, 4975, 4987, 6172, 0, 0], ], stop_loss=-0.03, roi={"0": float('inf')}, profit_perc=-0.03, trades=[BTrade(sell_reason=SellType.STOP_LOSS, open_tick=1, close_tick=1)] ) # 5) Stoploss and sell are hit. should sell on stoploss tc4 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], [1, 5000, 5025, 4800, 4987, 6172, 0, 1], # enter trade, stoploss hit, sell signal [2, 5000, 5025, 4975, 4987, 6172, 0, 0], ], stop_loss=-0.03, roi={"0": float('inf')}, profit_perc=-0.03, trades=[BTrade(sell_reason=SellType.STOP_LOSS, open_tick=1, close_tick=1)] ) TESTS = [ tc0, tc1, tc2, tc3, tc4 ] @pytest.mark.parametrize("data", TESTS) def test_edge_results(edge_conf, mocker, caplog, data) -> None: """ run functional tests """ freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) frame = _build_backtest_dataframe(data.data) caplog.set_level(logging.DEBUG) edge.fee = 0 trades = edge._find_trades_for_stoploss_range(frame, 'TEST/BTC', [data.stop_loss]) results = edge._fill_calculable_fields(DataFrame(trades)) if trades else DataFrame() assert len(trades) == len(data.trades) if not results.empty: assert round(results["profit_ratio"].sum(), 3) == round(data.profit_perc, 3) for c, trade in enumerate(data.trades): res = results.iloc[c] assert res.exit_type == trade.sell_reason assert res.open_date == _get_frame_time_from_offset(trade.open_tick).replace(tzinfo=None) assert res.close_date == _get_frame_time_from_offset(trade.close_tick).replace(tzinfo=None) def test_adjust(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), 'C/D': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), 'N/O': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60) } )) pairs = ['A/B', 'C/D', 'E/F', 'G/H'] assert(edge.adjust(pairs) == ['E/F', 'C/D']) def test_stoploss(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), 'C/D': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), 'N/O': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60) } )) assert edge.stoploss('E/F') == -0.01 def test_nonexisting_stoploss(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), } )) assert edge.stoploss('N/O') == -0.1 def test_stake_amount(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.02, 0.66, 3.71, 0.50, 1.71, 10, 60), } )) free = 100 total = 100 in_trade = 25 assert edge.stake_amount('E/F', free, total, in_trade) == 31.25 free = 20 total = 100 in_trade = 25 assert edge.stake_amount('E/F', free, total, in_trade) == 20 free = 0 total = 100 in_trade = 25 assert edge.stake_amount('E/F', free, total, in_trade) == 0 def test_nonexisting_stake_amount(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.11, 0.66, 3.71, 0.50, 1.71, 10, 60), } )) # should use strategy stoploss assert edge.stake_amount('N/O', 1, 2, 1) == 0.15 def test_edge_heartbeat_calculate(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) heartbeat = edge_conf['edge']['process_throttle_secs'] # should not recalculate if heartbeat not reached edge._last_updated = arrow.utcnow().int_timestamp - heartbeat + 1 assert edge.calculate() is False def mocked_load_data(datadir, pairs=[], timeframe='0m', timerange=None, *args, **kwargs): hz = 0.1 base = 0.001 NEOBTC = [ [ tests_start_time.shift(minutes=(x * timeframe_in_minute)).int_timestamp * 1000, math.sin(x * hz) / 1000 + base, math.sin(x * hz) / 1000 + base + 0.0001, math.sin(x * hz) / 1000 + base - 0.0001, math.sin(x * hz) / 1000 + base, 123.45 ] for x in range(0, 500)] hz = 0.2 base = 0.002 LTCBTC = [ [ tests_start_time.shift(minutes=(x * timeframe_in_minute)).int_timestamp * 1000, math.sin(x * hz) / 1000 + base, math.sin(x * hz) / 1000 + base + 0.0001, math.sin(x * hz) / 1000 + base - 0.0001, math.sin(x * hz) / 1000 + base, 123.45 ] for x in range(0, 500)] pairdata = {'NEO/BTC': ohlcv_to_dataframe(NEOBTC, '1h', pair="NEO/BTC", fill_missing=True), 'LTC/BTC': ohlcv_to_dataframe(LTCBTC, '1h', pair="LTC/BTC", fill_missing=True)} return pairdata def test_edge_process_downloaded_data(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.001)) mocker.patch('freqtrade.edge.edge_positioning.refresh_data', MagicMock()) mocker.patch('freqtrade.edge.edge_positioning.load_data', mocked_load_data) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) assert edge.calculate() assert len(edge._cached_pairs) == 2 assert edge._last_updated <= arrow.utcnow().int_timestamp + 2 def test_edge_process_no_data(mocker, edge_conf, caplog): freqtrade = get_patched_freqtradebot(mocker, edge_conf) mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.001)) mocker.patch('freqtrade.edge.edge_positioning.refresh_data', MagicMock()) mocker.patch('freqtrade.edge.edge_positioning.load_data', MagicMock(return_value={})) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) assert not edge.calculate() assert len(edge._cached_pairs) == 0 assert log_has("No data found. Edge is stopped ...", caplog) assert edge._last_updated == 0 def test_edge_process_no_trades(mocker, edge_conf, caplog): freqtrade = get_patched_freqtradebot(mocker, edge_conf) mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.001)) mocker.patch('freqtrade.edge.edge_positioning.refresh_data', MagicMock()) mocker.patch('freqtrade.edge.edge_positioning.load_data', mocked_load_data) # Return empty mocker.patch('freqtrade.edge.Edge._find_trades_for_stoploss_range', MagicMock(return_value=[])) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) assert not edge.calculate() assert len(edge._cached_pairs) == 0 assert log_has("No trades found.", caplog) def test_edge_init_error(mocker, edge_conf,): edge_conf['stake_amount'] = 0.5 mocker.patch('freqtrade.exchange.Exchange.get_fee', MagicMock(return_value=0.001)) with pytest.raises(OperationalException, match='Edge works only with unlimited stake amount'): get_patched_freqtradebot(mocker, edge_conf) @pytest.mark.parametrize("fee,risk_reward_ratio,expectancy", [ (0.0005, 306.5384615384, 101.5128205128), (0.001, 152.6923076923, 50.2307692308), ]) def test_process_expectancy(mocker, edge_conf, fee, risk_reward_ratio, expectancy): edge_conf['edge']['min_trade_number'] = 2 freqtrade = get_patched_freqtradebot(mocker, edge_conf) def get_fee(*args, **kwargs): return fee freqtrade.exchange.get_fee = get_fee edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), 'trade_duration': '', 'open_rate': 17, 'close_rate': 17, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'trade_duration': '', 'open_rate': 20, 'close_rate': 20, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), 'trade_duration': '', 'open_rate': 26, 'close_rate': 34, 'exit_type': 'sell_signal'} ] trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) assert len(final) == 1 assert 'TEST/BTC' in final assert final['TEST/BTC'].stoploss == -0.9 assert round(final['TEST/BTC'].winrate, 10) == 0.3333333333 assert round(final['TEST/BTC'].risk_reward_ratio, 10) == risk_reward_ratio assert round(final['TEST/BTC'].required_risk_reward, 10) == 2.0 assert round(final['TEST/BTC'].expectancy, 10) == expectancy # Pop last item so no trade is profitable trades.pop() trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) assert len(final) == 0 assert isinstance(final, dict) def test_process_expectancy_remove_pumps(mocker, edge_conf, fee,): edge_conf['edge']['min_trade_number'] = 2 edge_conf['edge']['remove_pumps'] = True freqtrade = get_patched_freqtradebot(mocker, edge_conf) freqtrade.exchange.get_fee = fee edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), 'open_index': 1, 'close_index': 1, 'trade_duration': '', 'open_rate': 17, 'close_rate': 15, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'open_index': 4, 'close_index': 4, 'trade_duration': '', 'open_rate': 20, 'close_rate': 10, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'open_index': 4, 'close_index': 4, 'trade_duration': '', 'open_rate': 20, 'close_rate': 10, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'open_index': 4, 'close_index': 4, 'trade_duration': '', 'open_rate': 20, 'close_rate': 10, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'open_index': 4, 'close_index': 4, 'trade_duration': '', 'open_rate': 20, 'close_rate': 10, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), 'open_index': 6, 'close_index': 7, 'trade_duration': '', 'open_rate': 26, 'close_rate': 134, 'exit_type': 'sell_signal'} ] trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) assert 'TEST/BTC' in final assert final['TEST/BTC'].stoploss == -0.9 assert final['TEST/BTC'].nb_trades == len(trades_df) - 1 assert round(final['TEST/BTC'].winrate, 10) == 0.0 def test_process_expectancy_only_wins(mocker, edge_conf, fee,): edge_conf['edge']['min_trade_number'] = 2 freqtrade = get_patched_freqtradebot(mocker, edge_conf) freqtrade.exchange.get_fee = fee edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), 'open_index': 1, 'close_index': 1, 'trade_duration': '', 'open_rate': 15, 'close_rate': 17, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), 'open_index': 4, 'close_index': 4, 'trade_duration': '', 'open_rate': 10, 'close_rate': 20, 'exit_type': 'sell_signal'}, {'pair': 'TEST/BTC', 'stoploss': -0.9, 'profit_percent': '', 'profit_abs': '', 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), 'open_index': 6, 'close_index': 7, 'trade_duration': '', 'open_rate': 26, 'close_rate': 134, 'exit_type': 'sell_signal'} ] trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) assert 'TEST/BTC' in final assert final['TEST/BTC'].stoploss == -0.9 assert final['TEST/BTC'].nb_trades == len(trades_df) assert round(final['TEST/BTC'].winrate, 10) == 1.0 assert round(final['TEST/BTC'].risk_reward_ratio, 10) == float('inf') assert round(final['TEST/BTC'].expectancy, 10) == float('inf')
gpl-3.0
skamkar/harvard-yard
visuals.py
19
5012
########################################### # Suppress matplotlib user warnings # Necessary for newer version of matplotlib import warnings warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") # # Display inline matplotlib plots with IPython from IPython import get_ipython get_ipython().run_line_magic('matplotlib', 'inline') ########################################### import matplotlib.pyplot as pl import numpy as np import sklearn.learning_curve as curves from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import ShuffleSplit, train_test_split def ModelLearning(X, y): """ Calculates the performance of several models with varying sizes of training data. The learning and testing scores for each model are then plotted. """ # Create 10 cross-validation sets for training and testing cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0) # Generate the training set sizes increasing by 50 train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int) # Create the figure window fig = pl.figure(figsize=(10,7)) # Create three different models based on max_depth for k, depth in enumerate([1,3,6,10]): # Create a Decision tree regressor at max_depth = depth regressor = DecisionTreeRegressor(max_depth = depth) # Calculate the training and testing scores sizes, train_scores, test_scores = curves.learning_curve(regressor, X, y, \ cv = cv, train_sizes = train_sizes, scoring = 'r2') # Find the mean and standard deviation for smoothing train_std = np.std(train_scores, axis = 1) train_mean = np.mean(train_scores, axis = 1) test_std = np.std(test_scores, axis = 1) test_mean = np.mean(test_scores, axis = 1) # Subplot the learning curve ax = fig.add_subplot(2, 2, k+1) ax.plot(sizes, train_mean, 'o-', color = 'r', label = 'Training Score') ax.plot(sizes, test_mean, 'o-', color = 'g', label = 'Testing Score') ax.fill_between(sizes, train_mean - train_std, \ train_mean + train_std, alpha = 0.15, color = 'r') ax.fill_between(sizes, test_mean - test_std, \ test_mean + test_std, alpha = 0.15, color = 'g') # Labels ax.set_title('max_depth = %s'%(depth)) ax.set_xlabel('Number of Training Points') ax.set_ylabel('Score') ax.set_xlim([0, X.shape[0]*0.8]) ax.set_ylim([-0.05, 1.05]) # Visual aesthetics ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad = 0.) fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 16, y = 1.03) fig.tight_layout() fig.show() def ModelComplexity(X, y): """ Calculates the performance of the model as model complexity increases. The learning and testing errors rates are then plotted. """ # Create 10 cross-validation sets for training and testing cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0) # Vary the max_depth parameter from 1 to 10 max_depth = np.arange(1,11) # Calculate the training and testing scores train_scores, test_scores = curves.validation_curve(DecisionTreeRegressor(), X, y, \ param_name = "max_depth", param_range = max_depth, cv = cv, scoring = 'r2') # Find the mean and standard deviation for smoothing train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # Plot the validation curve pl.figure(figsize=(7, 5)) pl.title('Decision Tree Regressor Complexity Performance') pl.plot(max_depth, train_mean, 'o-', color = 'r', label = 'Training Score') pl.plot(max_depth, test_mean, 'o-', color = 'g', label = 'Validation Score') pl.fill_between(max_depth, train_mean - train_std, \ train_mean + train_std, alpha = 0.15, color = 'r') pl.fill_between(max_depth, test_mean - test_std, \ test_mean + test_std, alpha = 0.15, color = 'g') # Visual aesthetics pl.legend(loc = 'lower right') pl.xlabel('Maximum Depth') pl.ylabel('Score') pl.ylim([-0.05,1.05]) pl.show() def PredictTrials(X, y, fitter, data): """ Performs trials of fitting and predicting data. """ # Store the predicted prices prices = [] for k in range(10): # Split the data X_train, X_test, y_train, y_test = train_test_split(X, y, \ test_size = 0.2, random_state = k) # Fit the data reg = fitter(X_train, y_train) # Make a prediction pred = reg.predict([data[0]])[0] prices.append(pred) # Result print "Trial {}: ${:,.2f}".format(k+1, pred) # Display price range print "\nRange in prices: ${:,.2f}".format(max(prices) - min(prices))
mit
ningchi/scikit-learn
sklearn/datasets/tests/test_samples_generator.py
67
14842
from __future__ import division from collections import defaultdict from functools import partial import numpy as np from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from sklearn.datasets import make_hastie_10_2 from sklearn.datasets import make_regression from sklearn.datasets import make_blobs from sklearn.datasets import make_friedman1 from sklearn.datasets import make_friedman2 from sklearn.datasets import make_friedman3 from sklearn.datasets import make_low_rank_matrix from sklearn.datasets import make_sparse_coded_signal from sklearn.datasets import make_sparse_uncorrelated from sklearn.datasets import make_spd_matrix from sklearn.datasets import make_swiss_roll from sklearn.datasets import make_s_curve from sklearn.datasets import make_biclusters from sklearn.datasets import make_checkerboard from sklearn.utils.validation import assert_all_finite def test_make_classification(): X, y = make_classification(n_samples=100, n_features=20, n_informative=5, n_redundant=1, n_repeated=1, n_classes=3, n_clusters_per_class=1, hypercube=False, shift=None, scale=None, weights=[0.1, 0.25], random_state=0) assert_equal(X.shape, (100, 20), "X shape mismatch") assert_equal(y.shape, (100,), "y shape mismatch") assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes") assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0") assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1") assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2") def test_make_classification_informative_features(): """Test the construction of informative features in make_classification Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and fully-specified `weights`. """ # Create very separate clusters; check that vertices are unique and # correspond to classes class_sep = 1e6 make = partial(make_classification, class_sep=class_sep, n_redundant=0, n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False) for n_informative, weights, n_clusters_per_class in [(2, [1], 1), (2, [1/3] * 3, 1), (2, [1/4] * 4, 1), (2, [1/2] * 2, 2), (2, [3/4, 1/4], 2), (10, [1/3] * 3, 10) ]: n_classes = len(weights) n_clusters = n_classes * n_clusters_per_class n_samples = n_clusters * 50 for hypercube in (False, True): X, y = make(n_samples=n_samples, n_classes=n_classes, weights=weights, n_features=n_informative, n_informative=n_informative, n_clusters_per_class=n_clusters_per_class, hypercube=hypercube, random_state=0) assert_equal(X.shape, (n_samples, n_informative)) assert_equal(y.shape, (n_samples,)) # Cluster by sign, viewed as strings to allow uniquing signs = np.sign(X) signs = signs.view(dtype='|S{0}'.format(signs.strides[0])) unique_signs, cluster_index = np.unique(signs, return_inverse=True) assert_equal(len(unique_signs), n_clusters, "Wrong number of clusters, or not in distinct " "quadrants") clusters_by_class = defaultdict(set) for cluster, cls in zip(cluster_index, y): clusters_by_class[cls].add(cluster) for clusters in clusters_by_class.values(): assert_equal(len(clusters), n_clusters_per_class, "Wrong number of clusters per class") assert_equal(len(clusters_by_class), n_classes, "Wrong number of classes") assert_array_almost_equal(np.bincount(y) / len(y) // weights, [1] * n_classes, err_msg="Wrong number of samples " "per class") # Ensure on vertices of hypercube for cluster in range(len(unique_signs)): centroid = X[cluster_index == cluster].mean(axis=0) if hypercube: assert_array_almost_equal(np.abs(centroid), [class_sep] * n_informative, decimal=0, err_msg="Clusters are not " "centered on hypercube " "vertices") else: assert_raises(AssertionError, assert_array_almost_equal, np.abs(centroid), [class_sep] * n_informative, decimal=0, err_msg="Clusters should not be cenetered " "on hypercube vertices") assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1) assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2) def test_make_multilabel_classification_return_sequences(): for allow_unlabeled, min_length in zip((True, False), (0, 1)): X, Y = assert_warns(DeprecationWarning, make_multilabel_classification, n_samples=100, n_features=20, n_classes=3, random_state=0, allow_unlabeled=allow_unlabeled) assert_equal(X.shape, (100, 20), "X shape mismatch") if not allow_unlabeled: assert_equal(max([max(y) for y in Y]), 2) assert_equal(min([len(y) for y in Y]), min_length) assert_true(max([len(y) for y in Y]) <= 3) def test_make_multilabel_classification_return_indicator(): for allow_unlabeled, min_length in zip((True, False), (0, 1)): X, Y = make_multilabel_classification(n_samples=25, n_features=20, n_classes=3, random_state=0, return_indicator=True, allow_unlabeled=allow_unlabeled) assert_equal(X.shape, (25, 20), "X shape mismatch") assert_equal(Y.shape, (25, 3), "Y shape mismatch") assert_true(np.all(np.sum(Y, axis=0) > min_length)) # Also test return_distributions X2, Y2, p_c, p_w_c = make_multilabel_classification( n_samples=25, n_features=20, n_classes=3, random_state=0, return_indicator=True, allow_unlabeled=allow_unlabeled, return_distributions=True) assert_array_equal(X, X2) assert_array_equal(Y, Y2) assert_equal(p_c.shape, (3,)) assert_almost_equal(p_c.sum(), 1) assert_equal(p_w_c.shape, (20, 3)) assert_almost_equal(p_w_c.sum(axis=0), [1] * 3) def test_make_hastie_10_2(): X, y = make_hastie_10_2(n_samples=100, random_state=0) assert_equal(X.shape, (100, 10), "X shape mismatch") assert_equal(y.shape, (100,), "y shape mismatch") assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes") def test_make_regression(): X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3, effective_rank=5, coef=True, bias=0.0, noise=1.0, random_state=0) assert_equal(X.shape, (100, 10), "X shape mismatch") assert_equal(y.shape, (100,), "y shape mismatch") assert_equal(c.shape, (10,), "coef shape mismatch") assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features") # Test that y ~= np.dot(X, c) + bias + N(0, 1.0). assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) # Test with small number of features. X, y = make_regression(n_samples=100, n_features=1) # n_informative=3 assert_equal(X.shape, (100, 1)) def test_make_regression_multitarget(): X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3, n_targets=3, coef=True, noise=1., random_state=0) assert_equal(X.shape, (100, 10), "X shape mismatch") assert_equal(y.shape, (100, 3), "y shape mismatch") assert_equal(c.shape, (10, 3), "coef shape mismatch") assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features") # Test that y ~= np.dot(X, c) + bias + N(0, 1.0) assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) def test_make_blobs(): X, y = make_blobs(n_samples=50, n_features=2, centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], random_state=0) assert_equal(X.shape, (50, 2), "X shape mismatch") assert_equal(y.shape, (50,), "y shape mismatch") assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs") def test_make_friedman1(): X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0, random_state=0) assert_equal(X.shape, (5, 10), "X shape mismatch") assert_equal(y.shape, (5,), "y shape mismatch") assert_array_almost_equal(y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4]) def test_make_friedman2(): X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0) assert_equal(X.shape, (5, 4), "X shape mismatch") assert_equal(y.shape, (5,), "y shape mismatch") assert_array_almost_equal(y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5) def test_make_friedman3(): X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0) assert_equal(X.shape, (5, 4), "X shape mismatch") assert_equal(y.shape, (5,), "y shape mismatch") assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0])) def test_make_low_rank_matrix(): X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5, tail_strength=0.01, random_state=0) assert_equal(X.shape, (50, 25), "X shape mismatch") from numpy.linalg import svd u, s, v = svd(X) assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5") def test_make_sparse_coded_signal(): Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8, n_features=10, n_nonzero_coefs=3, random_state=0) assert_equal(Y.shape, (10, 5), "Y shape mismatch") assert_equal(D.shape, (10, 8), "D shape mismatch") assert_equal(X.shape, (8, 5), "X shape mismatch") for col in X.T: assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch') assert_array_almost_equal(np.dot(D, X), Y) assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)), np.ones(D.shape[1])) def test_make_sparse_uncorrelated(): X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0) assert_equal(X.shape, (5, 10), "X shape mismatch") assert_equal(y.shape, (5,), "y shape mismatch") def test_make_spd_matrix(): X = make_spd_matrix(n_dim=5, random_state=0) assert_equal(X.shape, (5, 5), "X shape mismatch") assert_array_almost_equal(X, X.T) from numpy.linalg import eig eigenvalues, _ = eig(X) assert_array_equal(eigenvalues > 0, np.array([True] * 5), "X is not positive-definite") def test_make_swiss_roll(): X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0) assert_equal(X.shape, (5, 3), "X shape mismatch") assert_equal(t.shape, (5,), "t shape mismatch") assert_array_almost_equal(X[:, 0], t * np.cos(t)) assert_array_almost_equal(X[:, 2], t * np.sin(t)) def test_make_s_curve(): X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0) assert_equal(X.shape, (5, 3), "X shape mismatch") assert_equal(t.shape, (5,), "t shape mismatch") assert_array_almost_equal(X[:, 0], np.sin(t)) assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1)) def test_make_biclusters(): X, rows, cols = make_biclusters( shape=(100, 100), n_clusters=4, shuffle=True, random_state=0) assert_equal(X.shape, (100, 100), "X shape mismatch") assert_equal(rows.shape, (4, 100), "rows shape mismatch") assert_equal(cols.shape, (4, 100,), "columns shape mismatch") assert_all_finite(X) assert_all_finite(rows) assert_all_finite(cols) X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4, shuffle=True, random_state=0) assert_array_almost_equal(X, X2) def test_make_checkerboard(): X, rows, cols = make_checkerboard( shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0) assert_equal(X.shape, (100, 100), "X shape mismatch") assert_equal(rows.shape, (100, 100), "rows shape mismatch") assert_equal(cols.shape, (100, 100,), "columns shape mismatch") X, rows, cols = make_checkerboard( shape=(100, 100), n_clusters=2, shuffle=True, random_state=0) assert_all_finite(X) assert_all_finite(rows) assert_all_finite(cols) X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2, shuffle=True, random_state=0) X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2, shuffle=True, random_state=0) assert_array_equal(X1, X2)
bsd-3-clause
berkeley-stat159/project-delta
code/scripts/smoothing.py
1
2548
""" Purpose ------- This script applies smoothing with a Gaussian kernel in bulk to both the raw and the filtered BOLD signal data. The kernel used has a full-width-at-half-maximum measurement of 5 millimeters, which corresponds to a standard deviation of approximately 2.355 millimeters. It should output four files per run: two each for the raw and the filtered data, one of which is the image before smoothing and the other the image after smoothing. """ from __future__ import absolute_import, division, print_function import matplotlib.pyplot as plt import numpy as np import os, sys sys.path.append("code/utils") from make_class import * from plot_tool import * # Create a collection of all subject IDs and all run IDs run_IDs = [str(i).zfill(3) for i in range(1, 4)] subject_IDs = [str(i).zfill(3) for i in range(1, 17)] IDs = list(zip([run_ID for _ in range(16) for run_ID in run_IDs], [subject_ID for _ in range(3) for subject_ID in subject_IDs])) IDs.sort() # We perform the procedure outlined in this script for each run of each subject: for ID in IDs: run, subject = ID # Extract the data of interest obj = ds005(subject, run) # Define results directories to which to save the figures produced path_result = "results/run%s/smoothing/sub%s/" % ID try: os.makedirs(path_result) except OSError: if not os.path.isdir(path_result): raise # Each figure will be plotted with the help of the plot_volume() utility # contained in the plot_tool module. It produces a two-dimensional grid-like # canvas on which each horizontal slice of the brain is shown as a tile. raw_original = plot_volume(obj.raw.data, 50) plt.imshow(raw_original) plt.colorbar() plt.title("Raw Data: Before Smoothing") plt.savefig(path_result + "raw_original.png") plt.close() raw_smoothed = plot_volume(obj.raw.smooth(), 50) plt.imshow(raw_smoothed) plt.colorbar() plt.title("Raw Data: After Smoothing") plt.savefig(path_result + "raw_smoothed.png") plt.close() filtered_original = plot_volume(obj.filtered.data, 50) plt.imshow(filtered_original) plt.colorbar() plt.title("Filtered Data: Before Smoothing") plt.savefig(path_result + "filtered_original.png") plt.close() filtered_smoothed = plot_volume(obj.filtered.smooth(), 50) plt.imshow(filtered_smoothed) plt.colorbar() plt.title("Filtered Data: After Smoothing") plt.savefig(path_result + "filtered_smoothed.png") plt.close()
bsd-3-clause
mlyundin/scikit-learn
sklearn/cluster/birch.py
207
22706
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from math import sqrt from ..metrics.pairwise import euclidean_distances from ..base import TransformerMixin, ClusterMixin, BaseEstimator from ..externals.six.moves import xrange from ..utils import check_array from ..utils.extmath import row_norms, safe_sparse_dot from ..utils.validation import NotFittedError, check_is_fitted from .hierarchical import AgglomerativeClustering def _iterate_sparse_X(X): """This little hack returns a densified row when iterating over a sparse matrix, insted of constructing a sparse matrix for every row that is expensive. """ n_samples = X.shape[0] X_indices = X.indices X_data = X.data X_indptr = X.indptr for i in xrange(n_samples): row = np.zeros(X.shape[1]) startptr, endptr = X_indptr[i], X_indptr[i + 1] nonzero_indices = X_indices[startptr:endptr] row[nonzero_indices] = X_data[startptr:endptr] yield row def _split_node(node, threshold, branching_factor): """The node has to be split if there is no place for a new subcluster in the node. 1. Two empty nodes and two empty subclusters are initialized. 2. The pair of distant subclusters are found. 3. The properties of the empty subclusters and nodes are updated according to the nearest distance between the subclusters to the pair of distant subclusters. 4. The two nodes are set as children to the two subclusters. """ new_subcluster1 = _CFSubcluster() new_subcluster2 = _CFSubcluster() new_node1 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_node2 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_subcluster1.child_ = new_node1 new_subcluster2.child_ = new_node2 if node.is_leaf: if node.prev_leaf_ is not None: node.prev_leaf_.next_leaf_ = new_node1 new_node1.prev_leaf_ = node.prev_leaf_ new_node1.next_leaf_ = new_node2 new_node2.prev_leaf_ = new_node1 new_node2.next_leaf_ = node.next_leaf_ if node.next_leaf_ is not None: node.next_leaf_.prev_leaf_ = new_node2 dist = euclidean_distances( node.centroids_, Y_norm_squared=node.squared_norm_, squared=True) n_clusters = dist.shape[0] farthest_idx = np.unravel_index( dist.argmax(), (n_clusters, n_clusters)) node1_dist, node2_dist = dist[[farthest_idx]] node1_closer = node1_dist < node2_dist for idx, subcluster in enumerate(node.subclusters_): if node1_closer[idx]: new_node1.append_subcluster(subcluster) new_subcluster1.update(subcluster) else: new_node2.append_subcluster(subcluster) new_subcluster2.update(subcluster) return new_subcluster1, new_subcluster2 class _CFNode(object): """Each node in a CFTree is called a CFNode. The CFNode can have a maximum of branching_factor number of CFSubclusters. Parameters ---------- threshold : float Threshold needed for a new subcluster to enter a CFSubcluster. branching_factor : int Maximum number of CF subclusters in each node. is_leaf : bool We need to know if the CFNode is a leaf or not, in order to retrieve the final subclusters. n_features : int The number of features. Attributes ---------- subclusters_ : array-like list of subclusters for a particular CFNode. prev_leaf_ : _CFNode prev_leaf. Useful only if is_leaf is True. next_leaf_ : _CFNode next_leaf. Useful only if is_leaf is True. the final subclusters. init_centroids_ : ndarray, shape (branching_factor + 1, n_features) manipulate ``init_centroids_`` throughout rather than centroids_ since the centroids are just a view of the ``init_centroids_`` . init_sq_norm_ : ndarray, shape (branching_factor + 1,) manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. centroids_ : ndarray view of ``init_centroids_``. squared_norm_ : ndarray view of ``init_sq_norm_``. """ def __init__(self, threshold, branching_factor, is_leaf, n_features): self.threshold = threshold self.branching_factor = branching_factor self.is_leaf = is_leaf self.n_features = n_features # The list of subclusters, centroids and squared norms # to manipulate throughout. self.subclusters_ = [] self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) self.init_sq_norm_ = np.zeros((branching_factor + 1)) self.squared_norm_ = [] self.prev_leaf_ = None self.next_leaf_ = None def append_subcluster(self, subcluster): n_samples = len(self.subclusters_) self.subclusters_.append(subcluster) self.init_centroids_[n_samples] = subcluster.centroid_ self.init_sq_norm_[n_samples] = subcluster.sq_norm_ # Keep centroids and squared norm as views. In this way # if we change init_centroids and init_sq_norm_, it is # sufficient, self.centroids_ = self.init_centroids_[:n_samples + 1, :] self.squared_norm_ = self.init_sq_norm_[:n_samples + 1] def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): """Remove a subcluster from a node and update it with the split subclusters. """ ind = self.subclusters_.index(subcluster) self.subclusters_[ind] = new_subcluster1 self.init_centroids_[ind] = new_subcluster1.centroid_ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ self.append_subcluster(new_subcluster2) def insert_cf_subcluster(self, subcluster): """Insert a new subcluster into the node.""" if not self.subclusters_: self.append_subcluster(subcluster) return False threshold = self.threshold branching_factor = self.branching_factor # We need to find the closest subcluster among all the # subclusters so that we can insert our new subcluster. dist_matrix = np.dot(self.centroids_, subcluster.centroid_) dist_matrix *= -2. dist_matrix += self.squared_norm_ closest_index = np.argmin(dist_matrix) closest_subcluster = self.subclusters_[closest_index] # If the subcluster has a child, we need a recursive strategy. if closest_subcluster.child_ is not None: split_child = closest_subcluster.child_.insert_cf_subcluster( subcluster) if not split_child: # If it is determined that the child need not be split, we # can just update the closest_subcluster closest_subcluster.update(subcluster) self.init_centroids_[closest_index] = \ self.subclusters_[closest_index].centroid_ self.init_sq_norm_[closest_index] = \ self.subclusters_[closest_index].sq_norm_ return False # things not too good. we need to redistribute the subclusters in # our child node, and add a new subcluster in the parent # subcluster to accomodate the new child. else: new_subcluster1, new_subcluster2 = _split_node( closest_subcluster.child_, threshold, branching_factor) self.update_split_subclusters( closest_subcluster, new_subcluster1, new_subcluster2) if len(self.subclusters_) > self.branching_factor: return True return False # good to go! else: merged = closest_subcluster.merge_subcluster( subcluster, self.threshold) if merged: self.init_centroids_[closest_index] = \ closest_subcluster.centroid_ self.init_sq_norm_[closest_index] = \ closest_subcluster.sq_norm_ return False # not close to any other subclusters, and we still # have space, so add. elif len(self.subclusters_) < self.branching_factor: self.append_subcluster(subcluster) return False # We do not have enough space nor is it closer to an # other subcluster. We need to split. else: self.append_subcluster(subcluster) return True class _CFSubcluster(object): """Each subcluster in a CFNode is called a CFSubcluster. A CFSubcluster can have a CFNode has its child. Parameters ---------- linear_sum : ndarray, shape (n_features,), optional Sample. This is kept optional to allow initialization of empty subclusters. Attributes ---------- n_samples_ : int Number of samples that belong to each subcluster. linear_sum_ : ndarray Linear sum of all the samples in a subcluster. Prevents holding all sample data in memory. squared_sum_ : float Sum of the squared l2 norms of all samples belonging to a subcluster. centroid_ : ndarray Centroid of the subcluster. Prevent recomputing of centroids when ``CFNode.centroids_`` is called. child_ : _CFNode Child Node of the subcluster. Once a given _CFNode is set as the child of the _CFNode, it is set to ``self.child_``. sq_norm_ : ndarray Squared norm of the subcluster. Used to prevent recomputing when pairwise minimum distances are computed. """ def __init__(self, linear_sum=None): if linear_sum is None: self.n_samples_ = 0 self.squared_sum_ = 0.0 self.linear_sum_ = 0 else: self.n_samples_ = 1 self.centroid_ = self.linear_sum_ = linear_sum self.squared_sum_ = self.sq_norm_ = np.dot( self.linear_sum_, self.linear_sum_) self.child_ = None def update(self, subcluster): self.n_samples_ += subcluster.n_samples_ self.linear_sum_ += subcluster.linear_sum_ self.squared_sum_ += subcluster.squared_sum_ self.centroid_ = self.linear_sum_ / self.n_samples_ self.sq_norm_ = np.dot(self.centroid_, self.centroid_) def merge_subcluster(self, nominee_cluster, threshold): """Check if a cluster is worthy enough to be merged. If yes then merge. """ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_norm = np.dot(new_centroid, new_centroid) dot_product = (-2 * new_n) * new_norm sq_radius = (new_ss + dot_product) / new_n + new_norm if sq_radius <= threshold ** 2: (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = \ new_n, new_ls, new_ss, new_centroid, new_norm return True return False @property def radius(self): """Return radius of the subcluster""" dot_product = -2 * np.dot(self.linear_sum_, self.centroid_) return sqrt( ((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_) class Birch(BaseEstimator, TransformerMixin, ClusterMixin): """Implements the Birch clustering algorithm. Every new sample is inserted into the root of the Clustering Feature Tree. It is then clubbed together with the subcluster that has the centroid closest to the new sample. This is done recursively till it ends up at the subcluster of the leaf of the tree has the closest centroid. Read more in the :ref:`User Guide <birch>`. Parameters ---------- threshold : float, default 0.5 The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold. Otherwise a new subcluster is started. branching_factor : int, default 50 Maximum number of CF subclusters in each node. If a new samples enters such that the number of subclusters exceed the branching_factor then the node has to be split. The corresponding parent also has to be split and if the number of subclusters in the parent is greater than the branching factor, then it has to be split recursively. n_clusters : int, instance of sklearn.cluster model, default None Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples. By default, this final clustering step is not performed and the subclusters are returned as they are. If a model is provided, the model is fit treating the subclusters as new samples and the initial data is mapped to the label of the closest subcluster. If an int is provided, the model fit is AgglomerativeClustering with n_clusters set to the int. compute_labels : bool, default True Whether or not to compute labels for each fit. copy : bool, default True Whether or not to make a copy of the given data. If set to False, the initial data will be overwritten. Attributes ---------- root_ : _CFNode Root of the CFTree. dummy_leaf_ : _CFNode Start pointer to all the leaves. subcluster_centers_ : ndarray, Centroids of all subclusters read directly from the leaves. subcluster_labels_ : ndarray, Labels assigned to the centroids of the subclusters after they are clustered globally. labels_ : ndarray, shape (n_samples,) Array of labels assigned to the input data. if partial_fit is used instead of fit, they are assigned to the last batch of data. Examples -------- >>> from sklearn.cluster import Birch >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] >>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5, ... compute_labels=True) >>> brc.fit(X) Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None, threshold=0.5) >>> brc.predict(X) array([0, 0, 0, 1, 1, 1]) References ---------- * Tian Zhang, Raghu Ramakrishnan, Maron Livny BIRCH: An efficient data clustering method for large databases. http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf * Roberto Perdisci JBirch - Java implementation of BIRCH clustering algorithm https://code.google.com/p/jbirch/ """ def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True): self.threshold = threshold self.branching_factor = branching_factor self.n_clusters = n_clusters self.compute_labels = compute_labels self.copy = copy def fit(self, X, y=None): """ Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. """ self.fit_, self.partial_fit_ = True, False return self._fit(X) def _fit(self, X): X = check_array(X, accept_sparse='csr', copy=self.copy) threshold = self.threshold branching_factor = self.branching_factor if branching_factor <= 1: raise ValueError("Branching_factor should be greater than one.") n_samples, n_features = X.shape # If partial_fit is called for the first time or fit is called, we # start a new tree. partial_fit = getattr(self, 'partial_fit_') has_root = getattr(self, 'root_', None) if getattr(self, 'fit_') or (partial_fit and not has_root): # The first root is the leaf. Manipulate this object throughout. self.root_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) # To enable getting back subclusters. self.dummy_leaf_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) self.dummy_leaf_.next_leaf_ = self.root_ self.root_.prev_leaf_ = self.dummy_leaf_ # Cannot vectorize. Enough to convince to use cython. if not sparse.issparse(X): iter_func = iter else: iter_func = _iterate_sparse_X for sample in iter_func(X): subcluster = _CFSubcluster(linear_sum=sample) split = self.root_.insert_cf_subcluster(subcluster) if split: new_subcluster1, new_subcluster2 = _split_node( self.root_, threshold, branching_factor) del self.root_ self.root_ = _CFNode(threshold, branching_factor, is_leaf=False, n_features=n_features) self.root_.append_subcluster(new_subcluster1) self.root_.append_subcluster(new_subcluster2) centroids = np.concatenate([ leaf.centroids_ for leaf in self._get_leaves()]) self.subcluster_centers_ = centroids self._global_clustering(X) return self def _get_leaves(self): """ Retrieve the leaves of the CF Node. Returns ------- leaves: array-like List of the leaf nodes. """ leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while leaf_ptr is not None: leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done. """ self.partial_fit_, self.fit_ = True, False if X is None: # Perform just the final global clustering step. self._global_clustering() return self else: self._check_fit(X) return self._fit(X) def _check_fit(self, X): is_fitted = hasattr(self, 'subcluster_centers_') # Called by partial_fit, before fitting. has_partial_fit = hasattr(self, 'partial_fit_') # Should raise an error if one does not fit before predicting. if not (is_fitted or has_partial_fit): raise NotFittedError("Fit training data before predicting") if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]: raise ValueError( "Training data and predicted data do " "not have same number of features.") def predict(self, X): """ Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- labels: ndarray, shape(n_samples) Labelled data. """ X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= -2 reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)] def transform(self, X, y=None): """ Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data. """ check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_) def _global_clustering(self, X=None): """ Global clustering for the subclusters obtained after fitting """ clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering( n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True elif (clusterer is not None and not hasattr(clusterer, 'fit_predict')): raise ValueError("n_clusters should be an instance of " "ClusterMixin or an int") # To use in predict to avoid recalculation. self._subcluster_norms = row_norms( self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by Birch is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters)) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict( self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
bsd-3-clause
shangwuhencc/scikit-learn
examples/cluster/plot_digits_linkage.py
369
2959
""" ============================================================================= Various Agglomerative Clustering on a 2D embedding of digits ============================================================================= An illustration of various linkage option for agglomerative clustering on a 2D embedding of the digits dataset. The goal of this example is to show intuitively how the metrics behave, and not to find good clusters for the digits. This is why the example works on a 2D embedding. What this example shows us is the behavior "rich getting richer" of agglomerative clustering that tends to create uneven cluster sizes. This behavior is especially pronounced for the average linkage strategy, that ends up with a couple of singleton clusters. """ # Authors: Gael Varoquaux # License: BSD 3 clause (C) INRIA 2014 print(__doc__) from time import time import numpy as np from scipy import ndimage from matplotlib import pyplot as plt from sklearn import manifold, datasets digits = datasets.load_digits(n_class=10) X = digits.data y = digits.target n_samples, n_features = X.shape np.random.seed(0) def nudge_images(X, y): # Having a larger dataset shows more clearly the behavior of the # methods, but we multiply the size of the dataset only by 2, as the # cost of the hierarchical clustering methods are strongly # super-linear in n_samples shift = lambda x: ndimage.shift(x.reshape((8, 8)), .3 * np.random.normal(size=2), mode='constant', ).ravel() X = np.concatenate([X, np.apply_along_axis(shift, 1, X)]) Y = np.concatenate([y, y], axis=0) return X, Y X, y = nudge_images(X, y) #---------------------------------------------------------------------- # Visualize the clustering def plot_clustering(X_red, X, labels, title=None): x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0) X_red = (X_red - x_min) / (x_max - x_min) plt.figure(figsize=(6, 4)) for i in range(X_red.shape[0]): plt.text(X_red[i, 0], X_red[i, 1], str(y[i]), color=plt.cm.spectral(labels[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) plt.xticks([]) plt.yticks([]) if title is not None: plt.title(title, size=17) plt.axis('off') plt.tight_layout() #---------------------------------------------------------------------- # 2D embedding of the digits dataset print("Computing embedding") X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X) print("Done.") from sklearn.cluster import AgglomerativeClustering for linkage in ('ward', 'average', 'complete'): clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10) t0 = time() clustering.fit(X_red) print("%s : %.2fs" % (linkage, time() - t0)) plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage) plt.show()
bsd-3-clause
cbertinato/pandas
pandas/tests/tseries/frequencies/test_inference.py
1
13069
from datetime import datetime, timedelta import numpy as np import pytest from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG from pandas.compat import is_platform_windows from pandas import ( DatetimeIndex, Index, Series, Timestamp, date_range, period_range) from pandas.core.tools.datetimes import to_datetime import pandas.util.testing as tm import pandas.tseries.frequencies as frequencies import pandas.tseries.offsets as offsets def _check_generated_range(start, periods, freq): """ Check the range generated from a given start, frequency, and period count. Parameters ---------- start : str The start date. periods : int The number of periods. freq : str The frequency of the range. """ freq = freq.upper() gen = date_range(start, periods=periods, freq=freq) index = DatetimeIndex(gen.values) if not freq.startswith("Q-"): assert frequencies.infer_freq(index) == gen.freqstr else: inf_freq = frequencies.infer_freq(index) is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in ( "Q", "Q-DEC", "Q-SEP", "Q-JUN", "Q-MAR") is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in ( "Q-NOV", "Q-AUG", "Q-MAY", "Q-FEB") is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in ( "Q-OCT", "Q-JUL", "Q-APR", "Q-JAN") assert is_dec_range or is_nov_range or is_oct_range @pytest.fixture(params=[(timedelta(1), "D"), (timedelta(hours=1), "H"), (timedelta(minutes=1), "T"), (timedelta(seconds=1), "S"), (np.timedelta64(1, "ns"), "N"), (timedelta(microseconds=1), "U"), (timedelta(microseconds=1000), "L")]) def base_delta_code_pair(request): return request.param @pytest.fixture(params=[1, 2, 3, 4]) def count(request): return request.param @pytest.fixture(params=DAYS) def day(request): return request.param @pytest.fixture(params=MONTHS) def month(request): return request.param @pytest.fixture(params=[5, 7]) def periods(request): return request.param def test_raise_if_period_index(): index = period_range(start="1/1/1990", periods=20, freq="M") msg = "Check the `freq` attribute instead of using infer_freq" with pytest.raises(TypeError, match=msg): frequencies.infer_freq(index) def test_raise_if_too_few(): index = DatetimeIndex(["12/31/1998", "1/3/1999"]) msg = "Need at least 3 dates to infer frequency" with pytest.raises(ValueError, match=msg): frequencies.infer_freq(index) def test_business_daily(): index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"]) assert frequencies.infer_freq(index) == "B" def test_business_daily_look_alike(): # see gh-16624 # # Do not infer "B when "weekend" (2-day gap) in wrong place. index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"]) assert frequencies.infer_freq(index) is None def test_day_corner(): index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"]) assert frequencies.infer_freq(index) == "D" def test_non_datetime_index(): dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"]) assert frequencies.infer_freq(dates) == "D" def test_fifth_week_of_month_infer(): # see gh-9425 # # Only attempt to infer up to WOM-4. index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"]) assert frequencies.infer_freq(index) is None def test_week_of_month_fake(): # All of these dates are on same day # of week and are 4 or 5 weeks apart. index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", "2013-11-26"]) assert frequencies.infer_freq(index) != "WOM-4TUE" def test_fifth_week_of_month(): # see gh-9425 # # Only supports freq up to WOM-4. msg = ("Of the four parameters: start, end, periods, " "and freq, exactly three must be specified") with pytest.raises(ValueError, match=msg): date_range("2014-01-01", freq="WOM-5MON") def test_monthly_ambiguous(): rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"]) assert rng.inferred_freq == "M" def test_annual_ambiguous(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) assert rng.inferred_freq == "A-JAN" def test_infer_freq_delta(base_delta_code_pair, count): b = Timestamp(datetime.now()) base_delta, code = base_delta_code_pair inc = base_delta * count index = DatetimeIndex([b + inc * j for j in range(3)]) exp_freq = "%d%s" % (count, code) if count > 1 else code assert frequencies.infer_freq(index) == exp_freq @pytest.mark.parametrize("constructor", [ lambda now, delta: DatetimeIndex([now + delta * 7] + [now + delta * j for j in range(3)]), lambda now, delta: DatetimeIndex([now + delta * j for j in range(3)] + [now + delta * 7]) ]) def test_infer_freq_custom(base_delta_code_pair, constructor): b = Timestamp(datetime.now()) base_delta, _ = base_delta_code_pair index = constructor(b, base_delta) assert frequencies.infer_freq(index) is None def test_weekly_infer(periods, day): _check_generated_range("1/1/2000", periods, "W-{day}".format(day=day)) def test_week_of_month_infer(periods, day, count): _check_generated_range("1/1/2000", periods, "WOM-{count}{day}".format(count=count, day=day)) @pytest.mark.parametrize("freq", ["M", "BM", "BMS"]) def test_monthly_infer(periods, freq): _check_generated_range("1/1/2000", periods, "M") def test_quarterly_infer(month, periods): _check_generated_range("1/1/2000", periods, "Q-{month}".format(month=month)) @pytest.mark.parametrize("annual", ["A", "BA"]) def test_annually_infer(month, periods, annual): _check_generated_range("1/1/2000", periods, "{annual}-{month}".format(annual=annual, month=month)) @pytest.mark.parametrize("freq,expected", [ ("Q", "Q-DEC"), ("Q-NOV", "Q-NOV"), ("Q-OCT", "Q-OCT") ]) def test_infer_freq_index(freq, expected): rng = period_range("1959Q2", "2009Q3", freq=freq) rng = Index(rng.to_timestamp("D", how="e").astype(object)) assert rng.inferred_freq == expected @pytest.mark.parametrize( "expected,dates", list( {"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"], "Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"], "M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"], "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"], "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], "H": ["2011-12-31 22:00", "2011-12-31 23:00", "2012-01-01 00:00", "2012-01-01 01:00"]}.items()) ) def test_infer_freq_tz(tz_naive_fixture, expected, dates): # see gh-7310 tz = tz_naive_fixture idx = DatetimeIndex(dates, tz=tz) assert idx.inferred_freq == expected @pytest.mark.parametrize("date_pair", [ ["2013-11-02", "2013-11-5"], # Fall DST ["2014-03-08", "2014-03-11"], # Spring DST ["2014-01-01", "2014-01-03"] # Regular Time ]) @pytest.mark.parametrize("freq", [ "3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N" ]) def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq): # see gh-8772 tz = tz_naive_fixture idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz) assert idx.inferred_freq == freq def test_infer_freq_tz_transition_custom(): index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago") assert index.inferred_freq is None @pytest.mark.parametrize("data,expected", [ # Hourly freq in a day must result in "H" (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00", "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00"], "H"), (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00", "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00", "2014-07-01 15:00", "2014-07-01 16:00", "2014-07-02 09:00", "2014-07-02 10:00", "2014-07-02 11:00"], "BH"), (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00", "2014-07-07 10:00", "2014-07-07 11:00"], "BH"), (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00", "2014-07-07 10:00", "2014-07-07 11:00", "2014-07-07 12:00", "2014-07-07 13:00", "2014-07-07 14:00", "2014-07-07 15:00", "2014-07-07 16:00", "2014-07-08 09:00", "2014-07-08 10:00", "2014-07-08 11:00", "2014-07-08 12:00", "2014-07-08 13:00", "2014-07-08 14:00", "2014-07-08 15:00", "2014-07-08 16:00"], "BH"), ]) def test_infer_freq_business_hour(data, expected): # see gh-7905 idx = DatetimeIndex(data) assert idx.inferred_freq == expected def test_not_monotonic(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) rng = rng[::-1] assert rng.inferred_freq == "-1A-JAN" def test_non_datetime_index2(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) vals = rng.to_pydatetime() result = frequencies.infer_freq(vals) assert result == rng.inferred_freq @pytest.mark.parametrize("idx", [ tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10) ]) def test_invalid_index_types(idx): msg = ("(cannot infer freq from a non-convertible)|" "(Check the `freq` attribute instead of using infer_freq)") with pytest.raises(TypeError, match=msg): frequencies.infer_freq(idx) @pytest.mark.skipif(is_platform_windows(), reason="see gh-10822: Windows issue") @pytest.mark.parametrize("idx", [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]) def test_invalid_index_types_unicode(idx): # see gh-10822 # # Odd error message on conversions to datetime for unicode. msg = "Unknown string format" with pytest.raises(ValueError, match=msg): frequencies.infer_freq(idx) def test_string_datetime_like_compat(): # see gh-6463 data = ["2004-01", "2004-02", "2004-03", "2004-04"] expected = frequencies.infer_freq(data) result = frequencies.infer_freq(Index(data)) assert result == expected def test_series(): # see gh-6407 s = Series(date_range("20130101", "20130110")) inferred = frequencies.infer_freq(s) assert inferred == "D" @pytest.mark.parametrize("end", [10, 10.]) def test_series_invalid_type(end): # see gh-6407 msg = "cannot infer freq from a non-convertible dtype on a Series" s = Series(np.arange(end)) with pytest.raises(TypeError, match=msg): frequencies.infer_freq(s) def test_series_inconvertible_string(): # see gh-6407 msg = "Unknown string format" with pytest.raises(ValueError, match=msg): frequencies.infer_freq(Series(["foo", "bar"])) @pytest.mark.parametrize("freq", [None, "L"]) def test_series_period_index(freq): # see gh-6407 # # Cannot infer on PeriodIndex msg = "cannot infer freq from a non-convertible dtype on a Series" s = Series(period_range("2013", periods=10, freq=freq)) with pytest.raises(TypeError, match=msg): frequencies.infer_freq(s) @pytest.mark.parametrize("freq", ["M", "L", "S"]) def test_series_datetime_index(freq): s = Series(date_range("20130101", periods=10, freq=freq)) inferred = frequencies.infer_freq(s) assert inferred == freq @pytest.mark.parametrize("offset_func", [ frequencies.get_offset, lambda freq: date_range("2011-01-01", periods=5, freq=freq) ]) @pytest.mark.parametrize("freq", [ "WEEKDAY", "EOM", "W@MON", "W@TUE", "W@WED", "W@THU", "W@FRI", "W@SAT", "W@SUN", "Q@JAN", "Q@FEB", "Q@MAR", "A@JAN", "A@FEB", "A@MAR", "A@APR", "A@MAY", "A@JUN", "A@JUL", "A@AUG", "A@SEP", "A@OCT", "A@NOV", "A@DEC", "Y@JAN", "WOM@1MON", "WOM@2MON", "WOM@3MON", "WOM@4MON", "WOM@1TUE", "WOM@2TUE", "WOM@3TUE", "WOM@4TUE", "WOM@1WED", "WOM@2WED", "WOM@3WED", "WOM@4WED", "WOM@1THU", "WOM@2THU", "WOM@3THU", "WOM@4THU", "WOM@1FRI", "WOM@2FRI", "WOM@3FRI", "WOM@4FRI" ]) def test_legacy_offset_warnings(offset_func, freq): with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): offset_func(freq) def test_ms_vs_capital_ms(): left = frequencies.get_offset("ms") right = frequencies.get_offset("MS") assert left == offsets.Milli() assert right == offsets.MonthBegin()
bsd-3-clause
PG-TUe/tpot
tpot/config/classifier_mdr.py
4
1756
# -*- coding: utf-8 -*- """This file is part of the TPOT library. TPOT was primarily developed at the University of Pennsylvania by: - Randal S. Olson (rso@randalolson.com) - Weixuan Fu (weixuanf@upenn.edu) - Daniel Angell (dpa34@drexel.edu) - and many more generous open source contributors TPOT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. TPOT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TPOT. If not, see <http://www.gnu.org/licenses/>. """ # Check the TPOT documentation for information on the structure of config dicts tpot_mdr_classifier_config_dict = { # Classifiers 'sklearn.linear_model.LogisticRegression': { 'penalty': ["l1", "l2"], 'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.], 'dual': [True, False] }, # Feature constructors 'mdr.MDR': { 'tie_break': [0, 1], 'default_label': [0, 1] }, # Feature Selectors 'skrebate.ReliefF': { 'n_features_to_select': range(1, 6), 'n_neighbors': [2, 10, 50, 100, 250, 500] }, 'skrebate.SURF': { 'n_features_to_select': range(1, 6) }, 'skrebate.SURFstar': { 'n_features_to_select': range(1, 6) }, 'skrebate.MultiSURF': { 'n_features_to_select': range(1, 6) } }
lgpl-3.0
corpusmusic/bb-cluster
kmean_cluster.py
1
6478
from __future__ import print_function, division, absolute_import, unicode_literals import csv import numpy as np import os import scipy as sp from sklearn.cluster import KMeans # For visualization import itertools from scipy.spatial import distance import math import matplotlib.pyplot as plt # general parameters for project maxClusters = 15 # algorithm will run from 1 cluster to this cardinality outputFolder = 'cluster_output_data/' # where the output data will be stored visualizationFolder = 'cluster_output_viz/' # where the output data will be stored inertiaFile = 'inertia.csv' # where the inertia for each test is recorded # custom functions needed for project def read_data(filename): with open(filename, 'r') as csvf: return [row for row in csv.reader(csvf)] def get_cluster(title): return clusterIdOfSong[title] # create the array and song list needed for KMeans.fit X = read_data("songbysongtransprob.csv") # array of first-order conditional probabilities of chord occurrence Y = [] # list of song titles for indexing for song in X: Y.append(song[0]) song.pop(0) Y_init = Y X_mat = np.array(X, dtype = np.float) print(X_mat.shape) # create an empty dictionary in which to put inertia data for each clustering solution inertiaData = {} # Run the cluster analysis (km.fit) K = 1 # number of clusters while K <= maxClusters: km = KMeans(n_clusters = K, max_iter = 10000, n_init = 10000, n_jobs = -1, random_state = 42) # define the algorithm parameters km.fit(X_mat) # run cluster algorithm clusterName = 'cluster' + str(K) inertiaData[clusterName] = km.inertia_ # add within-cluster sum-of-squares for the solution to dictionary # assemble a dictionary containing the cluster membership # for each song in the corpus # write to file clusterIdOfSong = {} for title, cluster in zip(Y, km.labels_): clusterIdOfSong[title] = cluster csvfile = outputFolder + clusterName + '.csv' # filename identifies the number of clusters in the test with open(csvfile, 'w') as fout: writer = csv.writer(fout, lineterminator='\n') for title in clusterIdOfSong: writer.writerow((title, clusterIdOfSong[title])) # reset for next loop print(clusterName, 'completed.') K += 1 # write inertia data to file csvfile = outputFolder + inertiaFile with open(csvfile, 'w') as fout: writer = csv.writer(fout, lineterminator='\n') for clusterName in inertiaData: writer.writerow((clusterName, inertiaData[clusterName])) # VIZUALIZATIONS # Add center clusters to X, Y so that we can visualize them on the graph clusterNameList = [] for i in range(0,K): clusterNameList.append('CLUSTER' + str(i)) Y = Y_init + clusterNameList X = np.concatenate((X_mat,km.cluster_centers_), axis = 0) # Define dictionary of song names and song indices (for graphs) song_dict = dict() for i in range(0, len(Y)): song_dict[Y[i]] = X[i] song_index = dict() for i in range(0, len(Y)): song_index[Y[i]] = i for j in range(0,K): song_index['CLUSTER' + str(j)] = i + j + 1 # Compute distances between every two songs pairs_cosine = dict() pairs_euclidean = dict() counter = 0 for song_pair in itertools.combinations(Y, 2): pairs_cosine[song_pair] = distance.cosine(song_dict[song_pair[0]],song_dict[song_pair[1]]) pairs_euclidean[song_pair] = distance.euclidean(song_dict[song_pair[0]],song_dict[song_pair[1]]) counter += 1 print('Number of pairs: ' + str(counter)) # Normalize euclidean distances to 0-1 range max_dist = max(pairs_euclidean.values()) min_dist = min(pairs_euclidean.values()) def min_max_scaler(x, min_, max_): return (x-min_)/(max_-min_) # Filter out NaN distances and scale to range 0-1 (only needed with Euclidean distance) graph_cosine = pairs_cosine.items() graph_cosine = [i for i in graph_cosine if math.isnan(i[1]) == False] graph_euclidean = pairs_euclidean.items() graph_euclidean = [(i[0], min_max_scaler(i[1], min_dist, max_dist)) for i in graph_euclidean if math.isnan(i[1]) == False] # Plot distribution of distances, save to png files cosine_visualization_filename = visualizationFolder + '_cosine.png' euclidean_visualization_filename = visualizationFolder + '_euclidean.png' plt.figure(figsize=(10,5)) plt.ylabel('Number of distance pairs') plt.xlabel('Distance') plt.title('Histogram of cosine distance pairs') plt.hist([i[1] for i in graph_cosine], bins = 50, color = 'green') # plt.show() # If this line is un-commented, savefig() below will result in a blank file plt.savefig(cosine_visualization_filename) print('Cosine histogram saved to', cosine_visualization_filename) plt.figure(figsize=(10,5)) plt.ylabel('Number of distance pairs') plt.xlabel('Distance') plt.title('Histogram of euclidean distance pairs') plt.hist([i[1] for i in graph_euclidean], bins = 50, color = 'green') # plt.show() # If this line is un-commented, savefig() below will result in a blank file plt.savefig(euclidean_visualization_filename) print('Euclidean histogram saved to', euclidean_visualization_filename) # Manually create JSON file json_nodes = '''"nodes": [''' + '\n' for title, cluster in zip(Y, km.labels_): json_nodes += '''{"id": "''' + title + '''", "group": ''' + str(cluster) + '''},''' + '\n' json_nodes = json_nodes[:-2] + '\n'+ ''']''' # Links using cosine distance json_links = '''"links": [''' + '\n' for song_pair, dist in graph_cosine: if dist < 0.9: json_links += '\t' + '''{"source": ''' + str(song_index[song_pair[0]]) + ''', "target": ''' + str(song_index[song_pair[1]]) + ''', "value": ''' + str(dist) + '''},''' + '\n' json_links = json_links[:-2] + '\n'+ ''']''' json_string = '''{''' + json_nodes + ''',''' + json_links + '''}''' cosine_filename = visualizationFolder + '_cosine.json' text_file = open(cosine_filename, "w") text_file.write(json_string) text_file.close() # Links using euclidean distance json_links = '''"links": [''' + '\n' for song_pair, dist in graph_euclidean: if dist < 0.9: json_links += '\t' + '''{"source": ''' + str(song_index[song_pair[0]]) + ''', "target": ''' + str(song_index[song_pair[1]]) + ''', "value": ''' + str(dist) + '''},''' + '\n' json_links = json_links[:-2] + '\n'+ ''']''' json_string = '''{''' + json_nodes + ''',''' + json_links + '''}''' euclidean_filename = visualizationFolder + '_euclidean.json' text_file = open(euclidean_filename, "w") text_file.write(json_string) text_file.close()
gpl-3.0
MechCoder/scikit-learn
sklearn/feature_extraction/tests/test_image.py
17
10937
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # License: BSD 3 clause import numpy as np import scipy as sp from scipy import ndimage from scipy.sparse.csgraph import connected_components from numpy.testing import assert_raises from sklearn.feature_extraction.image import ( img_to_graph, grid_to_graph, extract_patches_2d, reconstruct_from_patches_2d, PatchExtractor, extract_patches) from sklearn.utils.testing import assert_equal, assert_true def test_img_to_graph(): x, y = np.mgrid[:4, :4] - 10 grad_x = img_to_graph(x) grad_y = img_to_graph(y) assert_equal(grad_x.nnz, grad_y.nnz) # Negative elements are the diagonal: the elements of the original # image. Positive elements are the values of the gradient, they # should all be equal on grad_x and grad_y np.testing.assert_array_equal(grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0]) def test_grid_to_graph(): # Checking that the function works with graphs containing no edges size = 2 roi_size = 1 # Generating two convex parts with one vertex # Thus, edges will be empty in _to_graph mask = np.zeros((size, size), dtype=np.bool) mask[0:roi_size, 0:roi_size] = True mask[-roi_size:, -roi_size:] = True mask = mask.reshape(size ** 2) A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) assert_true(connected_components(A)[0] == 2) # Checking that the function works whatever the type of mask is mask = np.ones((size, size), dtype=np.int16) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) assert_true(connected_components(A)[0] == 1) # Checking dtype of the graph mask = np.ones((size, size)) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool) assert_true(A.dtype == np.bool) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int) assert_true(A.dtype == np.int) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) assert_true(A.dtype == np.float64) def test_connect_regions(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) for thr in (50, 150): mask = face > thr graph = img_to_graph(face, mask) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) def test_connect_regions_with_grid(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) mask = face > 50 graph = grid_to_graph(*face.shape, mask=mask) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) mask = face > 150 graph = grid_to_graph(*face.shape, mask=mask, dtype=None) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) def _downsampled_face(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) face = face.astype(np.float32) face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]) face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]) face = face.astype(np.float32) face /= 16.0 return face def _orange_face(face=None): face = _downsampled_face() if face is None else face face_color = np.zeros(face.shape + (3,)) face_color[:, :, 0] = 256 - face face_color[:, :, 1] = 256 - face / 2 face_color[:, :, 2] = 256 - face / 4 return face_color def _make_images(face=None): face = _downsampled_face() if face is None else face # make a collection of faces images = np.zeros((3,) + face.shape) images[0] = face images[1] = face + 1 images[2] = face + 2 return images downsampled_face = _downsampled_face() orange_face = _orange_face(downsampled_face) face_collection = _make_images(downsampled_face) def test_extract_patches_all(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) def test_extract_patches_all_color(): face = orange_face i_h, i_w = face.shape[:2] p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3)) def test_extract_patches_all_rect(): face = downsampled_face face = face[:, 32:97] i_h, i_w = face.shape p_h, p_w = 16, 12 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) def test_extract_patches_max_patches(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w), max_patches=100) assert_equal(patches.shape, (100, p_h, p_w)) expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1)) patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w), max_patches=2.0) assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w), max_patches=-1.0) def test_reconstruct_patches_perfect(): face = downsampled_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_reconstruct_patches_perfect_color(): face = orange_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_patch_extractor_fit(): faces = face_collection extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) assert_true(extr == extr.fit(faces)) def test_patch_extractor_max_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 max_patches = 100 expected_n_patches = len(faces) * max_patches extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) max_patches = 0.5 expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1) * max_patches) extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) def test_patch_extractor_max_patches_default(): faces = face_collection extr = PatchExtractor(max_patches=100, random_state=0) patches = extr.transform(faces) assert_equal(patches.shape, (len(faces) * 100, 19, 25)) def test_patch_extractor_all_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) def test_patch_extractor_color(): faces = _make_images(orange_face) i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3)) def test_extract_patches_strided(): image_shapes_1D = [(10,), (10,), (11,), (10,)] patch_sizes_1D = [(1,), (2,), (3,), (8,)] patch_steps_1D = [(1,), (1,), (4,), (2,)] expected_views_1D = [(10,), (9,), (3,), (2,)] last_patch_1D = [(10,), (8,), (8,), (2,)] image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D expected_views = expected_views_1D + expected_views_2D + expected_views_3D last_patches = last_patch_1D + last_patch_2D + last_patch_3D for (image_shape, patch_size, patch_step, expected_view, last_patch) in zip(image_shapes, patch_sizes, patch_steps, expected_views, last_patches): image = np.arange(np.prod(image_shape)).reshape(image_shape) patches = extract_patches(image, patch_shape=patch_size, extraction_step=patch_step) ndim = len(image_shape) assert_true(patches.shape[:ndim] == expected_view) last_patch_slices = [slice(i, i + j, None) for i, j in zip(last_patch, patch_size)] assert_true((patches[[slice(-1, None, None)] * ndim] == image[last_patch_slices].squeeze()).all()) def test_extract_patches_square(): # test same patch size for all dimensions face = downsampled_face i_h, i_w = face.shape p = 8 expected_n_patches = ((i_h - p + 1), (i_w - p + 1)) patches = extract_patches(face, patch_shape=p) assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p)) def test_width_patch(): # width and height of the patch should be less than the image x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert_raises(ValueError, extract_patches_2d, x, (4, 1)) assert_raises(ValueError, extract_patches_2d, x, (1, 4))
bsd-3-clause
Mazecreator/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py
111
7865
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for pandas_io.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.learn_io import pandas_io from tensorflow.python.framework import errors from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False class PandasIoTest(test.TestCase): def makeTestDataFrame(self): index = np.arange(100, 104) a = np.arange(4) b = np.arange(32, 36) x = pd.DataFrame({'a': a, 'b': b}, index=index) y = pd.Series(np.arange(-32, -28), index=index) return x, y def callInputFnOnce(self, input_fn, session): results = input_fn() coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(session, coord=coord) result_values = session.run(results) coord.request_stop() coord.join(threads) return result_values def testPandasInputFn_IndexMismatch(self): if not HAS_PANDAS: return x, _ = self.makeTestDataFrame() y_noindex = pd.Series(np.arange(-32, -28)) with self.assertRaises(ValueError): pandas_io.pandas_input_fn( x, y_noindex, batch_size=2, shuffle=False, num_epochs=1) def testPandasInputFn_ProducesExpectedOutputs(self): if not HAS_PANDAS: return with self.test_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features, target = self.callInputFnOnce(input_fn, session) self.assertAllEqual(features['a'], [0, 1]) self.assertAllEqual(features['b'], [32, 33]) self.assertAllEqual(target, [-32, -31]) def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self): if not HAS_PANDAS: return with self.test_session() as session: index = np.arange(100, 102) a = np.arange(2) b = np.arange(32, 34) x = pd.DataFrame({'a': a, 'b': b}, index=index) y = pd.Series(np.arange(-32, -30), index=index) input_fn = pandas_io.pandas_input_fn( x, y, batch_size=128, shuffle=False, num_epochs=2) results = input_fn() coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(session, coord=coord) features, target = session.run(results) self.assertAllEqual(features['a'], [0, 1, 0, 1]) self.assertAllEqual(features['b'], [32, 33, 32, 33]) self.assertAllEqual(target, [-32, -31, -32, -31]) with self.assertRaises(errors.OutOfRangeError): session.run(results) coord.request_stop() coord.join(threads) def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self): if not HAS_PANDAS: return with self.test_session() as session: index = np.arange(100, 105) a = np.arange(5) b = np.arange(32, 37) x = pd.DataFrame({'a': a, 'b': b}, index=index) y = pd.Series(np.arange(-32, -27), index=index) input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) results = input_fn() coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(session, coord=coord) features, target = session.run(results) self.assertAllEqual(features['a'], [0, 1]) self.assertAllEqual(features['b'], [32, 33]) self.assertAllEqual(target, [-32, -31]) features, target = session.run(results) self.assertAllEqual(features['a'], [2, 3]) self.assertAllEqual(features['b'], [34, 35]) self.assertAllEqual(target, [-30, -29]) features, target = session.run(results) self.assertAllEqual(features['a'], [4]) self.assertAllEqual(features['b'], [36]) self.assertAllEqual(target, [-28]) with self.assertRaises(errors.OutOfRangeError): session.run(results) coord.request_stop() coord.join(threads) def testPandasInputFn_OnlyX(self): if not HAS_PANDAS: return with self.test_session() as session: x, _ = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y=None, batch_size=2, shuffle=False, num_epochs=1) features = self.callInputFnOnce(input_fn, session) self.assertAllEqual(features['a'], [0, 1]) self.assertAllEqual(features['b'], [32, 33]) def testPandasInputFn_ExcludesIndex(self): if not HAS_PANDAS: return with self.test_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features, _ = self.callInputFnOnce(input_fn, session) self.assertFalse('index' in features) def assertInputsCallableNTimes(self, input_fn, session, n): inputs = input_fn() coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(session, coord=coord) for _ in range(n): session.run(inputs) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() coord.join(threads) def testPandasInputFn_RespectsEpoch_NoShuffle(self): if not HAS_PANDAS: return with self.test_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=4, shuffle=False, num_epochs=1) self.assertInputsCallableNTimes(input_fn, session, 1) def testPandasInputFn_RespectsEpoch_WithShuffle(self): if not HAS_PANDAS: return with self.test_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=4, shuffle=True, num_epochs=1) self.assertInputsCallableNTimes(input_fn, session, 1) def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self): if not HAS_PANDAS: return with self.test_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2) self.assertInputsCallableNTimes(input_fn, session, 4) def testPandasInputFn_RespectsEpochUnevenBatches(self): if not HAS_PANDAS: return x, y = self.makeTestDataFrame() with self.test_session() as session: input_fn = pandas_io.pandas_input_fn( x, y, batch_size=3, shuffle=False, num_epochs=1) # Before the last batch, only one element of the epoch should remain. self.assertInputsCallableNTimes(input_fn, session, 2) def testPandasInputFn_Idempotent(self): if not HAS_PANDAS: return x, y = self.makeTestDataFrame() for _ in range(2): pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1)() for _ in range(2): pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=True, num_epochs=1)() if __name__ == '__main__': test.main()
apache-2.0
rahuldhote/scikit-learn
sklearn/semi_supervised/label_propagation.py
128
15312
# coding=utf8 """ Label propagation in the context of this module refers to a set of semisupervised classification algorithms. In the high level, these algorithms work by forming a fully-connected graph between all points given and solving for the steady-state distribution of labels at each point. These algorithms perform very well in practice. The cost of running can be very expensive, at approximately O(N^3) where N is the number of (labeled and unlabeled) points. The theory (why they perform so well) is motivated by intuitions from random walk algorithms and geometric relationships in the data. For more information see the references below. Model Features -------------- Label clamping: The algorithm tries to learn distributions of labels over the dataset. In the "Hard Clamp" mode, the true ground labels are never allowed to change. They are clamped into position. In the "Soft Clamp" mode, they are allowed some wiggle room, but some alpha of their original value will always be retained. Hard clamp is the same as soft clamping with alpha set to 1. Kernel: A function which projects a vector into some higher dimensional space. This implementation supprots RBF and KNN kernels. Using the RBF kernel generates a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of size O(k*N) which will run much faster. See the documentation for SVMs for more info on kernels. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) Notes ----- References: [1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised Learning (2006), pp. 193-216 [2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005 """ # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD from abc import ABCMeta, abstractmethod from scipy import sparse import numpy as np from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import rbf_kernel from ..utils.graph import graph_laplacian from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_X_y, check_is_fitted from ..externals import six from ..neighbors.unsupervised import NearestNeighbors ### Helper functions def _not_converged(y_truth, y_prediction, tol=1e-3): """basic convergence check""" return np.abs(y_truth - y_prediction).sum() > tol class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Base class for label propagation module. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state n_neighbors : integer > 0 Parameter for knn kernel """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=1, max_iter=30, tol=1e-3): self.max_iter = max_iter self.tol = tol # kernel parameters self.kernel = kernel self.gamma = gamma self.n_neighbors = n_neighbors # clamping factor self.alpha = alpha def _get_kernel(self, X, y=None): if self.kernel == "rbf": if y is None: return rbf_kernel(X, X, gamma=self.gamma) else: return rbf_kernel(X, y, gamma=self.gamma) elif self.kernel == "knn": if self.nn_fit is None: self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X) if y is None: return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X, self.n_neighbors, mode='connectivity') else: return self.nn_fit.kneighbors(y, return_distance=False) else: raise ValueError("%s is not a valid kernel. Only rbf and knn" " are supported at this time" % self.kernel) @abstractmethod def _build_graph(self): raise NotImplementedError("Graph construction must be implemented" " to fit a label propagation model.") def predict(self, X): """Performs inductive inference across the model. Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- y : array_like, shape = [n_samples] Predictions for input data """ probas = self.predict_proba(X) return self.classes_[np.argmax(probas, axis=1)].ravel() def predict_proba(self, X): """Predict probability for each possible outcome. Compute the probability estimates for each single sample in X and each possible outcome seen during training (categorical distribution). Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- probabilities : array, shape = [n_samples, n_classes] Normalized probability distributions across class labels """ check_is_fitted(self, 'X_') if sparse.isspmatrix(X): X_2d = X else: X_2d = np.atleast_2d(X) weight_matrices = self._get_kernel(self.X_, X_2d) if self.kernel == 'knn': probabilities = [] for weight_matrix in weight_matrices: ine = np.sum(self.label_distributions_[weight_matrix], axis=0) probabilities.append(ine) probabilities = np.array(probabilities) else: weight_matrices = weight_matrices.T probabilities = np.dot(weight_matrices, self.label_distributions_) normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T probabilities /= normalizer return probabilities def fit(self, X, y): """Fit a semi-supervised label propagation model based All the input data is provided matrix X (labeled and unlabeled) and corresponding label matrix y with a dedicated marker value for unlabeled samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] A {n_samples by n_samples} size matrix will be created from this y : array_like, shape = [n_samples] n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y) self.X_ = X # actual graph construction (implementations should override this) graph_matrix = self._build_graph() # label construction # construct a categorical distribution for classification only classes = np.unique(y) classes = (classes[classes != -1]) self.classes_ = classes n_samples, n_classes = len(y), len(classes) y = np.asarray(y) unlabeled = y == -1 clamp_weights = np.ones((n_samples, 1)) clamp_weights[unlabeled, 0] = self.alpha # initialize distributions self.label_distributions_ = np.zeros((n_samples, n_classes)) for label in classes: self.label_distributions_[y == label, classes == label] = 1 y_static = np.copy(self.label_distributions_) if self.alpha > 0.: y_static *= 1 - self.alpha y_static[unlabeled] = 0 l_previous = np.zeros((self.X_.shape[0], n_classes)) remaining_iter = self.max_iter if sparse.isspmatrix(graph_matrix): graph_matrix = graph_matrix.tocsr() while (_not_converged(self.label_distributions_, l_previous, self.tol) and remaining_iter > 1): l_previous = self.label_distributions_ self.label_distributions_ = safe_sparse_dot( graph_matrix, self.label_distributions_) # clamp self.label_distributions_ = np.multiply( clamp_weights, self.label_distributions_) + y_static remaining_iter -= 1 normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] self.label_distributions_ /= normalizer # set the transduction item transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)] self.transduction_ = transduction.ravel() self.n_iter_ = self.max_iter - remaining_iter return self class LabelPropagation(BaseLabelPropagation): """Label Propagation classifier Read more in the :ref:`User Guide <label_propagation>`. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel n_neighbors : integer > 0 Parameter for knn kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) References ---------- Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf See Also -------- LabelSpreading : Alternate label propagation strategy more robust to noise """ def _build_graph(self): """Matrix representing a fully connected graph between each sample This basic implementation creates a non-stochastic affinity matrix, so class distributions will exceed 1 (normalization may be desired). """ if self.kernel == 'knn': self.nn_fit = None affinity_matrix = self._get_kernel(self.X_) normalizer = affinity_matrix.sum(axis=0) if sparse.isspmatrix(affinity_matrix): affinity_matrix.data /= np.diag(np.array(normalizer)) else: affinity_matrix /= normalizer[:, np.newaxis] return affinity_matrix class LabelSpreading(BaseLabelPropagation): """LabelSpreading model for semi-supervised learning This model is similar to the basic Label Propgation algorithm, but uses affinity matrix based on the normalized graph Laplacian and soft clamping across the labels. Read more in the :ref:`User Guide <label_propagation>`. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported. gamma : float parameter for rbf kernel n_neighbors : integer > 0 parameter for knn kernel alpha : float clamping factor max_iter : float maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelSpreading >>> label_prop_model = LabelSpreading() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelSpreading(...) References ---------- Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, Bernhard Schoelkopf. Learning with local and global consistency (2004) http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219 See Also -------- LabelPropagation : Unregularized graph based semi-supervised learning """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2, max_iter=30, tol=1e-3): # this one has different base parameters super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol) def _build_graph(self): """Graph matrix for Label Spreading computes the graph laplacian""" # compute affinity matrix (or gram matrix) if self.kernel == 'knn': self.nn_fit = None n_samples = self.X_.shape[0] affinity_matrix = self._get_kernel(self.X_) laplacian = graph_laplacian(affinity_matrix, normed=True) laplacian = -laplacian if sparse.isspmatrix(laplacian): diag_mask = (laplacian.row == laplacian.col) laplacian.data[diag_mask] = 0.0 else: laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0 return laplacian
bsd-3-clause
Dekken/tick
examples/plot_hawkes_time_func_simu.py
2
1220
""" ===================================== Hawkes simulation with exotic kernels ===================================== Simulation of Hawkes processes with usage of custom kernels """ import matplotlib.pyplot as plt import numpy as np from tick.base import TimeFunction from tick.hawkes import SimuHawkes, HawkesKernelExp, HawkesKernelTimeFunc from tick.plot import plot_point_process t_values = np.array([0, 1, 1.5], dtype=float) y_values = np.array([0, .2, 0], dtype=float) tf1 = TimeFunction([t_values, y_values], inter_mode=TimeFunction.InterConstRight, dt=0.1) kernel_1 = HawkesKernelTimeFunc(tf1) t_values = np.array([0, .1, 2], dtype=float) y_values = np.array([0, .4, -0.2], dtype=float) tf2 = TimeFunction([t_values, y_values], inter_mode=TimeFunction.InterLinear, dt=0.1) kernel_2 = HawkesKernelTimeFunc(tf2) hawkes = SimuHawkes( kernels=[[kernel_1, kernel_1], [HawkesKernelExp(.07, 4), kernel_2]], baseline=[1.5, 1.5], verbose=False, seed=23983) run_time = 40 dt = 0.01 hawkes.track_intensity(dt) hawkes.end_time = run_time hawkes.simulate() fig, ax = plt.subplots(hawkes.n_nodes, 1, figsize=(14, 8)) plot_point_process(hawkes, t_max=20, ax=ax) plt.show()
bsd-3-clause
quheng/scikit-learn
sklearn/preprocessing/label.py
137
27165
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Joel Nothman <joel.nothman@gmail.com> # Hamzeh Alsalhi <ha258@cornell.edu> # License: BSD 3 clause from collections import defaultdict import itertools import array import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..utils.fixes import np_version from ..utils.fixes import sparse_min_max from ..utils.fixes import astype from ..utils.fixes import in1d from ..utils import column_or_1d from ..utils.validation import check_array from ..utils.validation import check_is_fitted from ..utils.validation import _num_samples from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'label_binarize', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', ] def _check_numpy_unicode_bug(labels): """Check that user is not subject to an old numpy bug Fixed in master before 1.7.0: https://github.com/numpy/numpy/pull/243 """ if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U': raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted" " on unicode data correctly. Please upgrade" " NumPy to use LabelEncoder with unicode inputs.") class LabelEncoder(BaseEstimator, TransformerMixin): """Encode labels with value between 0 and n_classes-1. Read more in the :ref:`User Guide <preprocessing_targets>`. Attributes ---------- classes_ : array of shape (n_class,) Holds the label for each class. Examples -------- `LabelEncoder` can be used to normalize labels. >>> from sklearn import preprocessing >>> le = preprocessing.LabelEncoder() >>> le.fit([1, 2, 2, 6]) LabelEncoder() >>> le.classes_ array([1, 2, 6]) >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS array([0, 0, 1, 2]...) >>> le.inverse_transform([0, 0, 1, 2]) array([1, 1, 2, 6]) It can also be used to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels. >>> le = preprocessing.LabelEncoder() >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) LabelEncoder() >>> list(le.classes_) ['amsterdam', 'paris', 'tokyo'] >>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS array([2, 2, 1]...) >>> list(le.inverse_transform([2, 2, 1])) ['tokyo', 'tokyo', 'paris'] """ def fit(self, y): """Fit label encoder Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. """ y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) self.classes_ = np.unique(y) return self def fit_transform(self, y): """Fit label encoder and return encoded labels Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ y = column_or_1d(y, warn=True) _check_numpy_unicode_bug(y) self.classes_, y = np.unique(y, return_inverse=True) return y def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ check_is_fitted(self, 'classes_') classes = np.unique(y) _check_numpy_unicode_bug(classes) if len(np.intersect1d(classes, self.classes_)) < len(classes): diff = np.setdiff1d(classes, self.classes_) raise ValueError("y contains new labels: %s" % str(diff)) return np.searchsorted(self.classes_, y) def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y : numpy array of shape [n_samples] """ check_is_fitted(self, 'classes_') diff = np.setdiff1d(y, np.arange(len(self.classes_))) if diff: raise ValueError("y contains new labels: %s" % str(diff)) y = np.asarray(y) return self.classes_[y] class LabelBinarizer(BaseEstimator, TransformerMixin): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. At learning time, this simply consists in learning one regressor or binary classifier per class. In doing so, one needs to convert multi-class labels to binary labels (belong or does not belong to the class). LabelBinarizer makes this process easy with the transform method. At prediction time, one assigns the class for which the corresponding model gave the greatest confidence. LabelBinarizer makes this easy with the inverse_transform method. Read more in the :ref:`User Guide <preprocessing_targets>`. Parameters ---------- neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False) True if the returned array from transform is desired to be in sparse CSR format. Attributes ---------- classes_ : array of shape [n_class] Holds the label for each class. y_type_ : str, Represents the type of the target data as evaluated by utils.multiclass.type_of_target. Possible type are 'continuous', 'continuous-multioutput', 'binary', 'multiclass', 'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'. multilabel_ : boolean True if the transformer was fitted on a multilabel rather than a multiclass set of labels. The ``multilabel_`` attribute is deprecated and will be removed in 0.18 sparse_input_ : boolean, True if the input data to transform is given as a sparse matrix, False otherwise. indicator_matrix_ : str 'sparse' when the input data to tansform is a multilable-indicator and is sparse, None otherwise. The ``indicator_matrix_`` attribute is deprecated as of version 0.16 and will be removed in 0.18 Examples -------- >>> from sklearn import preprocessing >>> lb = preprocessing.LabelBinarizer() >>> lb.fit([1, 2, 6, 4, 2]) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([1, 2, 4, 6]) >>> lb.transform([1, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) Binary targets transform to a column vector >>> lb = preprocessing.LabelBinarizer() >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) array([[1], [0], [0], [1]]) Passing a 2D matrix for multilabel classification >>> import numpy as np >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([0, 1, 2]) >>> lb.transform([0, 1, 2, 1]) array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) See also -------- label_binarize : function to perform the transform operation of LabelBinarizer with fixed classes. """ def __init__(self, neg_label=0, pos_label=1, sparse_output=False): if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if sparse_output and (pos_label == 0 or neg_label != 0): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) self.neg_label = neg_label self.pos_label = pos_label self.sparse_output = sparse_output def fit(self, y): """Fit label binarizer Parameters ---------- y : numpy array of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : returns an instance of self. """ self.y_type_ = type_of_target(y) if 'multioutput' in self.y_type_: raise ValueError("Multioutput target data is not supported with " "label binarization") if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) self.sparse_input_ = sp.issparse(y) self.classes_ = unique_labels(y) return self def transform(self, y): """Transform multi-class labels to binary labels The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : numpy array or sparse matrix of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. """ check_is_fitted(self, 'classes_') y_is_multilabel = type_of_target(y).startswith('multilabel') if y_is_multilabel and not self.y_type_.startswith('multilabel'): raise ValueError("The object was not fitted with multilabel" " input.") return label_binarize(y, self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output) def inverse_transform(self, Y, threshold=None): """Transform binary labels back to multi-class labels Parameters ---------- Y : numpy array or sparse matrix with shape [n_samples, n_classes] Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float or None Threshold used in the binary and multi-label cases. Use 0 when: - Y contains the output of decision_function (classifier) Use 0.5 when: - Y contains the output of predict_proba If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y : numpy array or CSR matrix of shape [n_samples] Target values. Notes ----- In the case when the binary labels are fractional (probabilistic), inverse_transform chooses the class with the greatest value. Typically, this allows to use the output of a linear model's decision_function method directly as the input of inverse_transform. """ check_is_fitted(self, 'classes_') if threshold is None: threshold = (self.pos_label + self.neg_label) / 2. if self.y_type_ == "multiclass": y_inv = _inverse_binarize_multiclass(Y, self.classes_) else: y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold) if self.sparse_input_: y_inv = sp.csr_matrix(y_inv) elif sp.issparse(y_inv): y_inv = y_inv.toarray() return y_inv def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like Sequence of integer labels or multilabel data to encode. classes : array-like of shape [n_classes] Uniquely holds the label for each class. neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]]) See also -------- LabelBinarizer : class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation """ if not isinstance(y, list): # XXX Workaround that will be removed when list of list format is # dropped y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None) else: if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if (sparse_output and (pos_label == 0 or neg_label != 0)): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) # To account for pos_label == 0 in the dense case pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if 'multioutput' in y_type: raise ValueError("Multioutput target data is not supported with label " "binarization") if y_type == 'unknown': raise ValueError("The type of target data is not known") n_samples = y.shape[0] if sp.issparse(y) else len(y) n_classes = len(classes) classes = np.asarray(classes) if y_type == "binary": if len(classes) == 1: Y = np.zeros((len(y), 1), dtype=np.int) Y += neg_label return Y elif len(classes) >= 3: y_type = "multiclass" sorted_class = np.sort(classes) if (y_type == "multilabel-indicator" and classes.size != y.shape[1]): raise ValueError("classes {0} missmatch with the labels {1}" "found in the data".format(classes, unique_labels(y))) if y_type in ("binary", "multiclass"): y = column_or_1d(y) # pick out the known labels from y y_in_classes = in1d(y, classes) y_seen = y[y_in_classes] indices = np.searchsorted(sorted_class, y_seen) indptr = np.hstack((0, np.cumsum(y_in_classes))) data = np.empty_like(indices) data.fill(pos_label) Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) elif y_type == "multilabel-indicator": Y = sp.csr_matrix(y) if pos_label != 1: data = np.empty_like(Y.data) data.fill(pos_label) Y.data = data else: raise ValueError("%s target data is not supported with label " "binarization" % y_type) if not sparse_output: Y = Y.toarray() Y = astype(Y, int, copy=False) if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 else: Y.data = astype(Y.data, int, copy=False) # preserve label ordering if np.any(classes != sorted_class): indices = np.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y.getcol(-1) else: Y = Y[:, -1].reshape((-1, 1)) return Y def _inverse_binarize_multiclass(y, classes): """Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold. """ classes = np.asarray(classes) if sp.issparse(y): # Find the argmax for each row in y where y is a CSR matrix y = y.tocsr() n_samples, n_outputs = y.shape outputs = np.arange(n_outputs) row_max = sparse_min_max(y, 1)[1] row_nnz = np.diff(y.indptr) y_data_repeated_max = np.repeat(row_max, row_nnz) # picks out all indices obtaining the maximum per row y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) # For corner case where last row has a max of 0 if row_max[-1] == 0: y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) # Gets the index of the first argmax in each row from y_i_all_argmax index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) # first argmax of each row y_ind_ext = np.append(y.indices, [0]) y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] # Handle rows of all 0 y_i_argmax[np.where(row_nnz == 0)[0]] = 0 # Handles rows with max of 0 that contain negative numbers samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] for i in samples: ind = y.indices[y.indptr[i]:y.indptr[i + 1]] y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] return classes[y_i_argmax] else: return classes.take(y.argmax(axis=1), mode="clip") def _inverse_binarize_thresholding(y, output_type, classes, threshold): """Inverse label binarization transformation using thresholding.""" if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: raise ValueError("output_type='binary', but y.shape = {0}". format(y.shape)) if output_type != "binary" and y.shape[1] != len(classes): raise ValueError("The number of class is not equal to the number of " "dimension of y.") classes = np.asarray(classes) # Perform thresholding if sp.issparse(y): if threshold > 0: if y.format not in ('csr', 'csc'): y = y.tocsr() y.data = np.array(y.data > threshold, dtype=np.int) y.eliminate_zeros() else: y = np.array(y.toarray() > threshold, dtype=np.int) else: y = np.array(y > threshold, dtype=np.int) # Inverse transform data if output_type == "binary": if sp.issparse(y): y = y.toarray() if y.ndim == 2 and y.shape[1] == 2: return classes[y[:, 1]] else: if len(classes) == 1: y = np.empty(len(y), dtype=classes.dtype) y.fill(classes[0]) return y else: return classes[y.ravel()] elif output_type == "multilabel-indicator": return y else: raise ValueError("{0} format is not supported".format(output_type)) class MultiLabelBinarizer(BaseEstimator, TransformerMixin): """Transform between iterable of iterables and a multilabel format Although a list of sets or tuples is a very intuitive format for multilabel data, it is unwieldy to process. This transformer converts between this intuitive format and the supported multilabel format: a (samples x classes) binary matrix indicating the presence of a class label. Parameters ---------- classes : array-like of shape [n_classes] (optional) Indicates an ordering for the class labels sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Attributes ---------- classes_ : array of labels A copy of the `classes` parameter where provided, or otherwise, the sorted set of classes found when fitting. Examples -------- >>> mlb = MultiLabelBinarizer() >>> mlb.fit_transform([(1, 2), (3,)]) array([[1, 1, 0], [0, 0, 1]]) >>> mlb.classes_ array([1, 2, 3]) >>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])]) array([[0, 1, 1], [1, 0, 0]]) >>> list(mlb.classes_) ['comedy', 'sci-fi', 'thriller'] """ def __init__(self, classes=None, sparse_output=False): self.classes = classes self.sparse_output = sparse_output def fit(self, y): """Fit the label sets binarizer, storing `classes_` Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : returns this MultiLabelBinarizer instance """ if self.classes is None: classes = sorted(set(itertools.chain.from_iterable(y))) else: classes = self.classes dtype = np.int if all(isinstance(c, int) for c in classes) else object self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self def fit_transform(self, y): """Fit the label sets binarizer and transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ if self.classes is not None: return self.fit(y).transform(y) # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = np.int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) yt.indices = np.take(inverse, yt.indices) if not self.sparse_output: yt = yt.toarray() return yt def transform(self, y): """Transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ class_to_index = dict(zip(self.classes_, range(len(self.classes_)))) yt = self._transform(y, class_to_index) if not self.sparse_output: yt = yt.toarray() return yt def _transform(self, y, class_mapping): """Transforms the label sets with a given mapping Parameters ---------- y : iterable of iterables class_mapping : Mapping Maps from label to column index in label indicator matrix Returns ------- y_indicator : sparse CSR matrix, shape (n_samples, n_classes) Label indicator matrix """ indices = array.array('i') indptr = array.array('i', [0]) for labels in y: indices.extend(set(class_mapping[label] for label in labels)) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) return sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping))) def inverse_transform(self, yt): """Transform the given indicator matrix into label sets Parameters ---------- yt : array or sparse matrix of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`. """ if yt.shape[1] != len(self.classes_): raise ValueError('Expected indicator for {0} classes, but got {1}' .format(len(self.classes_), yt.shape[1])) if sp.issparse(yt): yt = yt.tocsr() if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: raise ValueError('Expected only 0s and 1s in label indicator.') return [tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:])] else: unexpected = np.setdiff1d(yt, [0, 1]) if len(unexpected) > 0: raise ValueError('Expected only 0s and 1s in label indicator. ' 'Also got {0}'.format(unexpected)) return [tuple(self.classes_.compress(indicators)) for indicators in yt]
bsd-3-clause
samgill844/waveletspec
waveletspec/coadd_spectra.py
1
7263
from __future__ import print_function import os,sys import numpy as np from astropy.io import fits import waveletspec,os,sys import matplotlib.pyplot as plt this_dir, this_filename = os.path.split(waveletspec.__file__) import glob import waveletspec,os,sys this_dir, this_filename = os.path.split(waveletspec.__file__) print('ISPEC_1: {}/iSpec_v20160930_py2'.format(this_dir)) # First search for the iSpec tar (eithe Python2 or 3 version) try: if (sys.version_info < (3, 0)): os.stat(this_dir+'/iSpec_v20160930_py2') print('Found python 2 version of iSpec') ispec_dir = this_dir+'/iSpec_v20160930_py2' sys.path.insert(0, os.path.abspath((this_dir+'/iSpec_v20160930_py2'))) if (sys.version_info > (3, 0)): os.stat(this_dir+'/iSpec_v20160930_py3') print('Found python 3 version of iSpec') ispec_dir = this_dir+'/iSpec_v20160930_py3' sys.path.insert(0, os.path.abspath((this_dir+'/iSpec_v20160930_py3'))) except OSError as e: # export the tar print('I cant find the iSpec directory, searching for the tar files...') if (sys.version_info > (3, 0)): os.system('tar -xf {}/iSpec_v20160930_py3.tar.gz -C {}'.format(this_dir,this_dir)) print('Exctracted to {}/iSpec_v20160930_py3'.format(this_dir)) sys.path.insert(0, os.path.abspath((this_dir+'/iSpec_v20160930_py3'))) ispec_dir = this_dir+'/iSpec_v20160930_py3' elif (sys.version_info < (3, 0)): os.system('tar -xf {}/iSpec_v20160930_py2.tar.gz -C {}'.format(this_dir,this_dir)) print('Exctracted to {}/iSpec_v20160930_py2'.format(this_dir)) sys.path.insert(0, os.path.abspath((this_dir+'/iSpec_v20160930_py2'))) ispec_dir = this_dir+'/iSpec_v20160930_py2' else: print('I CANT FIND ISPEC') import ispec from progress.bar import Bar def determine_radial_velocity_with_template(data,velocity_step): # - Read synthetic template #template = ispec.read_spectrum(ispec_dir + "/input/spectra/templates/Atlas.Arcturus.372_926nm/template.txt.gz") #template = ispec.read_spectrum(ispec_dir + "/input/spectra/templates/Atlas.Sun.372_926nm/template.txt.gz") template = ispec.read_spectrum(ispec_dir + "/input/spectra/templates/NARVAL.Sun.370_1048nm/template.txt.gz") #template = ispec.read_spectrum(ispec_dir + "/input/spectra/templates/Synth.Sun.300_1100nm/template.txt.gz") models, ccf = ispec.cross_correlate_with_template(data, template, \ lower_velocity_limit=-200, upper_velocity_limit=200, \ velocity_step=velocity_step, fourier=False) # Number of models represent the number of components components = len(models) print(components) # First component: rv = np.round(models[0].mu(), 2) # km/s rv_err = np.round(models[0].emu(), 2) # km/s return rv, rv_err def coadd_spectra(files=None,scale_method = 'nothing', combine_method='coadd', velocity_step=1.0): ######################## # First sort files out ######################## if files==None: files = glob.glob('COR*.fits') if len(files) == 1: print('Use the "load_spectra" command for a single file.') return if len(files) == 0: print('No files found.') return ############################## # Now cycle through the files ############################## spectra = [] wavelengths = [] bar = Bar('Correcting for RVs', max=len(files)) for i in range(len(files)): data = ispec.read_spectrum(files[i]) #data = ispec. resample_spectrum(data, np.linspace(627.5, 680.5,2**14)) # FOR INT SPECTRA rv, rv_err = determine_radial_velocity_with_template(data,velocity_step) print('{}: {} +- {}'.format(files[i],rv,rv_err)) #if abs(rv)>100: # bar.next() # continue data = ispec.correct_velocity(data, rv) spectra.append(data['flux']) wavelengths.append(data['waveobs']) bar.next() bar.finish() if len(spectra)==0: print('RV correction produced no spectra to co-add.') return ################################################################## # Now find the max of the mins and mins of the max for wavelength ################################################################## mins,maxs =[],[] for i in range(len(wavelengths)): mins.append(min(wavelengths[i])) maxs.append(max(wavelengths[i])) minimum_wavelength = max(mins)+1 maximum_wavelength = min(maxs)-1 print('The minimum usable wavelength range is {} - {} nm'.format(minimum_wavelength,maximum_wavelength)) ################################# # Now re-interpolate the spectra ################################# wavelength_range = np.linspace(minimum_wavelength,maximum_wavelength,2**18) bar = Bar('Re-interpolating spectra', max=len(wavelengths)) for i in range(len(wavelengths)): spectra[i] = np.interp(wavelength_range,wavelengths[i], spectra[i]) bar.next() bar.finish() del wavelengths # keep useage low spectra = np.array(spectra) # for ease later ##################################### # Now use method to scale spectra ##################################### if scale_method == 'median': medians = np.median(spectra,axis=1) for i in range(len(spectra)): spectra[i] = spectra[i]/medians[i] print('Scale method: median') elif scale_method == 'mean': means = np.mean(spectra,axis=1) for i in range(len(spectra)): spectra[i] = spectra[i]/means[i] print('Scale method: mean') elif scale_method == 'nothing': print('Scale method: nothing') pass else: print('Scale method no understood') return ##################################################### # Now plot spectra to ensure the method is behaving ##################################################### fig1 = plt.figure() ax1 = fig1.add_subplot(111) number_of_plots = len(spectra) colormap = plt.cm.nipy_spectral #I suggest to use nipy_spectral, Set1,Paired ax1.set_color_cycle([colormap(i) for i in np.linspace(0, 1,number_of_plots)]) for i in range(len(spectra)): plt.plot(wavelength_range,spectra[i],label=files[i]) plt.legend() plt.xlabel('Wavelength (nm)') plt.ylabel('Counts') #plt.xlim(683.9,739.5) plt.show(block=False) response = input('Are you happy? ') if (response.lower() == 'y') or (response.lower() == 'yes'): pass else: print('Breaking') return ##################################### # Now use method to combine spectra ##################################### if combine_method == 'median': spectrae = np.std(spectra,axis=0) spectra = np.median(spectra,axis=0) print('Combination method: median') elif combine_method == 'mean': spectrae = np.std(spectra,axis=0) spectra = np.mean(spectra,axis=0) print('Combination method: mean') elif combine_method == 'coadd': spectra = np.sum(spectra,axis=0) spectrae = [0.0]*len(spectra) print('Combination method: coadd') pass else: print('Combinaton method no understood') return plt.plot(wavelength_range,spectra,'r--',linewidth=1.5) response = input('Check the red line. Happy? ') if (response.lower() == 'y') or (response.lower() == 'yes'): pass else: print('Breaking') return data = ispec.create_spectrum_structure(wavelength_range) data['waveobs'] = wavelength_range data['flux'] = spectra data['err'] = spectrae return data
agpl-3.0
nwjs/chromium.src
tools/perf/cli_tools/pinpoint_cli/histograms_df_test.py
5
4115
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from cli_tools.pinpoint_cli import histograms_df from core.external_modules import pandas from tracing.value import histogram from tracing.value import histogram_set from tracing.value.diagnostics import date_range from tracing.value.diagnostics import generic_set def TestHistogram(name, units, values, **kwargs): def DiagnosticValue(value): if isinstance(value, (int, long)): return date_range.DateRange(value) elif isinstance(value, list): return generic_set.GenericSet(value) else: raise NotImplementedError(type(value)) hist = histogram.Histogram(name, units) hist.diagnostics.update( (key, DiagnosticValue(value)) for key, value in kwargs.iteritems()) for value in values: hist.AddSample(value) return hist @unittest.skipIf(pandas is None, 'pandas not available') class TestHistogramsDf(unittest.TestCase): def testIterRows(self): run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'], 'benchmarks': ['system_health'], 'deviceIds': ['device1']} # Second run on same device ten minutes later. run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'], 'benchmarks': ['system_health'], 'deviceIds': ['device1']} hists = histogram_set.HistogramSet([ TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'], traceUrls=['http://url/to/trace1'], **run1), TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'], traceUrls=['http://url/to/trace2'], **run1), TestHistogram('memory', 'sizeInBytes', [512], stories=['story2'], traceUrls=['http://url/to/trace3'], **run2), ]) expected = [ ('startup', 'ms', 10.0, 2.0, 3, 'run1', 'system_health', 'story1', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace1'), ('memory', 'sizeInBytes', 256.0, 0.0, 1, 'run1', 'system_health', 'story2', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace2'), ('memory', 'sizeInBytes', 512.0, 0.0, 1, 'run2', 'system_health', 'story2', '2009-02-13 23:41:30', 'device1', 'http://url/to/trace3'), ] self.assertItemsEqual(histograms_df.IterRows(hists.AsDicts()), expected) def testDataFrame(self): run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'], 'benchmarks': ['system_health'], 'deviceIds': ['device1']} # Second run on same device ten minutes later. run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'], 'benchmarks': ['system_health'], 'deviceIds': ['device1']} hists = histogram_set.HistogramSet([ TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'], traceUrls=['http://url/to/trace1'], **run1), TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'], traceUrls=['http://url/to/trace2'], **run1), TestHistogram('memory', 'sizeInBytes', [384], stories=['story2'], traceUrls=['http://url/to/trace3'], **run2), ]) df = histograms_df.DataFrame(hists.AsDicts()) # Poke at the data frame and check a few known facts about our fake data: # It has 3 histograms. self.assertEqual(len(df), 3) # The benchmark has two stories. self.assertItemsEqual(df['story'].unique(), ['story1', 'story2']) # We recorded three traces. self.assertEqual(len(df['trace_url'].unique()), 3) # All benchmarks ran on the same device. self.assertEqual(len(df['device_id'].unique()), 1) # There is a memory regression between runs 1 and 2. memory = df.set_index(['name', 'run_label']).loc['memory']['mean'] self.assertEqual(memory['run2'] - memory['run1'], 128.0) # Ten minutes passed between the two benchmark runs. self.assertEqual(df['benchmark_start'].max() - df['benchmark_start'].min(), pandas.Timedelta('10 minutes'))
bsd-3-clause
openhumanoids/director
src/python/director/planplayback.py
2
7145
import os import vtkAll as vtk import math import time import re import numpy as np from director.timercallback import TimerCallback from director import objectmodel as om from director.simpletimer import SimpleTimer from director.utime import getUtime from director import robotstate import pickle import scipy.interpolate def asRobotPlan(msg): ''' If the given message is a robot_plan_with_supports_t then this function returns the plan message contained within it. For any other message type, this function just returns its input argument. ''' try: import drc as lcmdrc except ImportError: pass else: if isinstance(msg, lcmdrc.robot_plan_with_supports_t): return msg.plan return msg class PlanPlayback(object): def __init__(self): self.animationCallback = None self.animationTimer = None self.interpolationMethod = 'slinear' self.playbackSpeed = 1.0 self.jointNameRegex = '' @staticmethod def getPlanPoses(msgOrList): if isinstance(msgOrList, list): messages = msgOrList allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0]) for msg in messages[1:]: poseTimes, poses = PlanPlayback.getPlanPoses(msg) poseTimes += allPoseTimes[-1] allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:])) allPoses += poses[1:] return allPoseTimes, allPoses else: msg = asRobotPlan(msgOrList) poses = [] poseTimes = [] for plan in msg.plan: pose = robotstate.convertStateMessageToDrakePose(plan) poseTimes.append(plan.utime / 1e6) poses.append(pose) return np.array(poseTimes), poses @staticmethod def getPlanElapsedTime(msg): msg = asRobotPlan(msg) startTime = msg.plan[0].utime endTime = msg.plan[-1].utime return (endTime - startTime) / 1e6 def stopAnimation(self): if self.animationTimer: self.animationTimer.stop() def setInterpolationMethod(method): self.interpolationMethod = method def playPlan(self, msg, jointController): self.playPlans([msg], jointController) def playPlans(self, messages, jointController): assert len(messages) poseTimes, poses = self.getPlanPoses(messages) self.playPoses(poseTimes, poses, jointController) def getPoseInterpolatorFromPlan(self, message): poseTimes, poses = self.getPlanPoses(message) return self.getPoseInterpolator(poseTimes, poses) def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True): if unwrap_rpy: poses = np.array(poses, copy=True) poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0) if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']: f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod) elif self.interpolationMethod == 'pchip': f = scipy.interpolate.pchip(poseTimes, poses, axis=0) return f def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples): poseTimes, poses = self.getPlanPoses(messages) f = self.getPoseInterpolator(poseTimes, poses) sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples) meshes = [] for sampleTime in sampleTimes: pose = f(sampleTime) jointController.setPose('plan_playback', pose) polyData = vtk.vtkPolyData() robotModel.model.getModelMesh(polyData) meshes.append(polyData) return meshes def showPoseAtTime(self, time, jointController, poseInterpolator): pose = poseInterpolator(time) jointController.setPose('plan_playback', pose) def playPoses(self, poseTimes, poses, jointController): f = self.getPoseInterpolator(poseTimes, poses) timer = SimpleTimer() def updateAnimation(): tNow = timer.elapsed() * self.playbackSpeed if tNow > poseTimes[-1]: pose = poses[-1] jointController.setPose('plan_playback', pose) if self.animationCallback: self.animationCallback() return False pose = f(tNow) jointController.setPose('plan_playback', pose) if self.animationCallback: self.animationCallback() self.animationTimer = TimerCallback() self.animationTimer.targetFps = 60 self.animationTimer.callback = updateAnimation self.animationTimer.start() updateAnimation() def picklePlan(self, filename, msg): poseTimes, poses = self.getPlanPoses(msg) pickle.dump((poseTimes, poses), open(filename, 'w')) def getMovingJointNames(self, msg): poseTimes, poses = self.getPlanPoses(msg) diffs = np.diff(poses, axis=0) jointIds = np.unique(np.where(diffs != 0.0)[1]) jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds] return jointNames def plotPlan(self, msg): poseTimes, poses = self.getPlanPoses(msg) self.plotPoses(poseTimes, poses) def plotPoses(self, poseTimes, poses): import matplotlib.pyplot as plt poses = np.array(poses) if self.jointNameRegex: jointIds = range(poses.shape[1]) else: diffs = np.diff(poses, axis=0) jointIds = np.unique(np.where(diffs != 0.0)[1]) jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds] jointTrajectories = [poses[:,jointId] for jointId in jointIds] seriesNames = [] sampleResolutionInSeconds = 0.01 numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples) fig = plt.figure() ax = fig.add_subplot(111) for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories): if self.jointNameRegex and not re.match(self.jointNameRegex, jointName): continue x = poseTimes y = jointTrajectory y = np.rad2deg(y) if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']: f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod) elif self.interpolationMethod == 'pchip': f = scipy.interpolate.pchip(x, y) ax.plot(x, y, 'ko') seriesNames.append(jointName + ' points') ax.plot(xnew, f(xnew), '-') seriesNames.append(jointName + ' ' + self.interpolationMethod) ax.legend(seriesNames, loc='upper right').draggable() ax.set_xlabel('time (s)') ax.set_ylabel('joint angle (deg)') ax.set_title('joint trajectories') plt.show()
bsd-3-clause
sys-bio/tellurium
docs/conf.py
1
10266
# -*- coding: utf-8 -*- # # tellurium documentation build configuration file, created by # sphinx-quickstart on Fri Jan 22 13:08:36 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx_rtd_theme from mock import Mock as MagicMock # Mock things for readthedoc build class Mock(MagicMock): @classmethod def __getattr__(cls, name): return Mock() MOCK_MODULES = ['roadrunner', 'roadrunner.testing', 'antimony', 'libsbml', 'libsedml', 'phrasedml', 'sbml2matlab', 'sedml2py', 'pygraphviz' 'numpy' 'matplotlib' 'ipython' 'ipywidgets'] # sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('../tellurium')) sys.path.append(os.path.join(os.path.dirname(__name__), '..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'tellurium' copyright = u'2014-2019, Kiri Choi, J Kyle Medley, Matthias König, Kaylene Stocking, Caroline Cannistra, Michal Galdzicki, and Herbert Sauro' author = u'Kiri Choi, J Kyle Medley, Matthias König, Kaylene Stocking, Caroline Cannistra, Michal Galdzicki, and Herbert Sauro' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = None with open(os.path.join(os.path.dirname(__file__), '../tellurium/VERSION.txt'), 'r') as f: version = str(f.read().rstrip()) # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = './images/tellurium_logo_50.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = './images/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'telluriumdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'tellurium.tex', u'tellurium Documentation', u'sys-bio', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'tellurium', u'tellurium Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'tellurium', u'tellurium Documentation', author, 'tellurium', 'Integrated dynamical modeling environment..', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
apache-2.0
lucabaldini/ximpol
ximpol/irf/psf.py
1
8912
#!/usr/bin/env python # # Copyright (C) 2015, the ximpol team. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import numpy from astropy.io import fits from ximpol.utils.logging_ import logger from ximpol.irf.base import OGIP_HEADER_SPECS from ximpol.core.fitsio import xBinTableHDUBase from ximpol.core.spline import xInterpolatedUnivariateSpline from ximpol.core.rand import xUnivariateGenerator def gauss_king(r, W, sigma, N, r_c, eta): """Functional representation of the Gaussian plus King PSF profile described in `Fabiani et al., 2014 <http://arxiv.org/abs/1403.7200>`_, equation (2): .. math:: \\text{PSF}(r) = W \\exp^{-(\\frac{r^2}{2\\sigma^2})} + N\\left( 1 + \\left( \\frac{r}{r_c} \\right)^2 \\right)^{-\\eta} Arguments --------- r : float or array The radial distance from the true source position is arcsec. W : float Normalization of the Gaussian component. sigma : float Width of the Gaussian component. N : float Normalization of the King component. r_c : float Characteristic radius of the King component. eta : float Exponent of the King component. """ return W*numpy.exp(-(r**2/(2*sigma**2))) + N*(1 + (r/r_c)**2)**(-eta) def gauss_king_eef_at_infinity(W, sigma, N, r_c, eta): """Return the value of the Encircled Energy Fraction (EEF) at infinity, given the parameters of the functional representation, see equation (4) of `Fabiani et al., 2014 <http://arxiv.org/abs/1403.7200>`_. .. math:: \\text{EEF}(\\infty) = 2\\pi W\\sigma^2 + \\pi\\frac{r_c^2 N}{\\eta - 1} Arguments --------- r : float or array The radial distance from the true source position is arcsec. W : float Normalization of the Gaussian component. sigma : float Width of the Gaussian component. N : float Normalization of the King component. r_c : float Characteristic radius of the King component. eta : float Exponent of the King component. """ return 2*numpy.pi*W*sigma**2 + numpy.pi*r_c**2*N/(eta - 1.) class xBinTableHDUPSF(xBinTableHDUBase): """Binary table for the PSF extension of a psf file. """ NAME = 'PSF' DATA_SPECS = [ ('W' , 'E', '1/sr'), ('SIGMA', 'E', 'arcsec'), ('N' , 'E', '1/sr'), ('R_C' , 'E', 'arcsec'), ('ETA' , 'E') ] class xPointSpreadFunction(xInterpolatedUnivariateSpline): """Class describing a (simplified, energy independent) PSF. The effective area is essentially a linear spline, with built-in facilities for evaluation and plotting. Arguments --------- psf_file_path : str The path to the .psf FITS file containing the effective area table. rmax : float The maximum radial distance (in arcsec) for the PSF. Example ------- >>> from ximpol import XIMPOL_IRF >>> from ximpol.utils.matplotlib_ import pyplot as plt >>> >>> file_path = os.path.join(XIMPOL_IRF,'fits','xipe_baseline.psf') >>> psf = xPointSpreadFunction(file_path) >>> print(psf.rvs(10)) >>> ra, dec = 5.0, 12.3 >>> print(psf.smear_single(ra, dec, 10)) Note ---- The parametrization is taken from `Fabiani et al., 2014 <http://arxiv.org/abs/1403.7200>`_, table 2. The PSF is technically energy dependent, but the dependence is not wild and for the moment we stick with the values at 4.51 keV in the table. """ MAX_RADIUS = 150. PARAM_NAMES = ['W', 'sigma', 'N', 'r_c', 'eta'] def __init__(self, psf_file_path): """Constructor. """ logger.info('Reading PSF data from %s...' % psf_file_path) self.hdu_list = fits.open(psf_file_path) self.hdu_list.info() _data = self.hdu_list['PSF'].data W = _data['W'] sigma = _data['SIGMA'] N = _data['N'] r_c = _data['R_C'] eta = _data['ETA'] self.__params = (W, sigma, N, r_c, eta) # Tabulate the actual PSF values. _r = numpy.linspace(0, self.MAX_RADIUS, 250) _y = gauss_king(_r, *self.__params) fmt = dict(xname='r', xunits='arcsec', yname='PSF', yunits='sr$^{-1}$') xInterpolatedUnivariateSpline.__init__(self, _r, _y, k=2, **fmt) # Include the solid angle for the actual underlying random generator. _y *= 2*numpy.pi*_r fmt = dict(rvname='r', rvunits='arcsec', pdfname='$2 \\pi r \\times$ PSF', pdfunits='') self.generator = xUnivariateGenerator(_r, _y, k=1, **fmt) # Finally, calculate the self.eef, self.hew = self.build_eef() logger.info(self) def build_eef(self): """Build the Encircled Energy Fraction (EEF) as a function of r. And, while we're at it, we also calculate and cache the HEW. """ _r = self.x _y = numpy.array([self.generator.integral(_r[0], _rp) for _rp in _r]) _y /= gauss_king_eef_at_infinity(*self.__params) hew = 2*xInterpolatedUnivariateSpline(_y, _r, k=2)(0.5) fmt = dict(xname='r', xunits='arcsec', yname='EEF') return xInterpolatedUnivariateSpline(_r, _y, k=1, **fmt), hew def view(self, show=True): """Overloaded plot method (with default log scale on the y-axis). """ from ximpol.utils.matplotlib_ import pyplot as plt plt.figure('PSF') xInterpolatedUnivariateSpline.plot(self, logy=True, show=False) plt.figure('Solid-angle convolution') self.generator.plot(logy=True, show=False) plt.figure('EEF') self.eef.plot(show=False) if show: plt.show() def rvs(self, size): """Extract values of the radial distance according to the PSF shape. """ return self.generator.rvs(size) def delta(self, size=1): """Return an array of random offset (in ra, dec or L, B) due to the PSF. Note the output is converted in degrees. """ rho = self.rvs(size)/3600. phi = numpy.random.uniform(0, 2*numpy.pi, size) return rho*numpy.cos(phi), rho*numpy.sin(phi) def smear_single(self, ra, dec, num_times=1): """Smear a pair of coordinates for an arbitrary number of times. """ delta_ra, delta_dec = self.delta(size=num_times) return ra + delta_ra/numpy.cos(numpy.radians(dec)), dec + delta_dec def smear(self, ra, dec): """Smear a pair of arrays of coordinates. """ assert(ra.size == dec.size) delta_ra, delta_dec = self.delta(ra.size) return ra + delta_ra/numpy.cos(numpy.radians(dec)), dec + delta_dec def __str__(self): """String formatting. """ text = 'Gauss + King PSF, ' for i, name in enumerate(self.PARAM_NAMES): text += '%s = %.3e, ' % (name, self.__params[i]) text += 'HEW = %.1f arcsec' % self.hew return text def draw_psf_circle(self, image, x, y, text='PSF', color='white', lw=2, number=False): """Add the PSF circle to the image with labels. This function must be called after the (possible) image recenter. Note the x and y are coordinates relative to the figure axes (0.0 is left or bottom and 1.0 is right or top). """ psf_rad = self.hew/(2.*60.**2) #degrees xpix_low, xpix_high = image._ax1.get_xbound() ypix_low, ypix_high = image._ax1.get_ybound() xpix_circ = x*(xpix_high-xpix_low) + xpix_low ypix_circ = y*(ypix_high-ypix_low) + ypix_low ra_circ, dec_circ = image.pixel2world(xpix_circ, ypix_circ) dec_text_up = dec_circ+1.7*psf_rad dec_text_down = dec_circ-1.9*psf_rad image.add_label(ra_circ, dec_text_up, text, size='x-large', color=color, horizontalalignment='center') image.show_circles(ra_circ, dec_circ, psf_rad, lw=lw, color=color) if number: text_psf = '%d"' %round(self.hew) image.add_label(ra_circ, dec_text_down, text_psf, size='large', color=color, horizontalalignment='center')
gpl-3.0
hawkrobe/couzin_replication
analysis/experiment3/helpers/rectangular_world.py
1
3590
import numpy as np import pandas as pd import utils import copy import config class RectangularWorld(): def __init__(self, noise_location, round_length = 6, full_noise_file = True, center_radius = None, edge_goal = False): if noise_location is not None: if full_noise_file: self.noise_location = noise_location + '/' with open(self.noise_location + 't0.csv') as f: self.noise_line_width = len(f.readline()) assert not edge_goal else: self.centers = np.array(pd.read_csv(noise_location)) self.center_radius = center_radius self.edge_goal = edge_goal self.full_noise_file = full_noise_file self.world = config.WORLD self.shape = 'rectangle' self.tick_frequency = 125 self.ticks_per_sec = 1000/125 self.us_min_wage_per_tick = 7.25 / (60*60*(1000 / self.tick_frequency)) self.round_length = round_length self.max_bonus = 1.25*6/self.round_length self.game_length = int(self.round_length * 60 * self.ticks_per_sec) self.min_speed = 17 / float(self.ticks_per_sec) self.max_speed = 57 / float(self.ticks_per_sec) self.size = config.SIZE self.pos_limits = config.POS_LIMITS def get_random_position(self): """ >>> np.random.seed(1) >>> w = RectangularWorld('../test/') >>> np.round(np.max([w.get_random_position()[0] for i in range(10000)])) 482.0 >>> round(np.min([w.get_random_position()[0] for i in range(10000)])) 3.0 >>> round(np.min([w.get_random_position()[1] for i in range(10000)])) 3.0 >>> round(np.max([w.get_random_position()[1] for i in range(10000)])) 277.0 """ pos = None if self.edge_goal: while True: pos = np.array([np.random.uniform(self.pos_limits["x_min"], self.pos_limits["x_max"]), np.random.uniform(self.pos_limits["y_min"], self.pos_limits["y_max"])]) if utils.check_collision(pos, self.pos_limits, self.shape, update = False, extended = True): break else: pos = np.array([np.random.uniform(self.pos_limits["x_min"], self.pos_limits["x_max"]), np.random.uniform(self.pos_limits["y_min"], self.pos_limits["y_max"])]) return pos def get_random_angle(self): """ >>> np.random.seed(1) >>> w = RectangularWorld('../test/') >>> np.max([w.get_random_angle() for i in range(10000)]) 359.0 >>> np.min([w.get_random_angle() for i in range(10000)]) 0.0 """ return np.floor(np.random.random() * 360) def get_score(self, pos, time): """ >>> w = RectangularWorld('../test/') >>> w.get_score(np.array([2.1,2.9]), 0) 0.0 """ if self.full_noise_file: return utils.get_score(pos, time, self.noise_location, self.pos_limits, self.noise_line_width) else: if self.edge_goal: return utils.wall_score(pos, self.centers[time], self.center_radius, self.pos_limits, self.shape) else: return utils.calculate_score(pos, self.centers[time], self.center_radius, self.pos_limits, self.shape) if __name__ == "__main__": import doctest doctest.testmod()
mit
charliememory/AutonomousDriving
CarND-Vehicle-Detection/src/fast_multiscale_search.py
1
5024
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import pickle import cv2 from feature import * from utils import * # dist_pickle = pickle.load( open("svc_pickle.p", "rb" ) ) # svc = dist_pickle["svc"] # x_scaler = dist_pickle["scaler"] # orient = dist_pickle["orient"] # pix_per_cell = dist_pickle["pix_per_cell"] # cell_per_block = dist_pickle["cell_per_block"] # spatial_size = dist_pickle["spatial_size"] # hist_bins = dist_pickle["hist_bins"] # img = mpimg.imread('test_image.jpg') # Define a single function that can extract features using hog sub-sampling and make predictions def find_cars(img, window, ystart, ystop, scale, svc, x_scaler, color_space, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, spatial_feat=True, hist_feat=True, hog_feat=True): # draw_img = np.copy(img) img = img.astype(np.float32)/255 img_tosearch = img[ystart:ystop,:,:] ctrans_tosearch = color_convert_from_RGB(img_tosearch, color_space) if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) ch1 = ctrans_tosearch[:,:,0] ch2 = ctrans_tosearch[:,:,1] ch3 = ctrans_tosearch[:,:,2] # Define blocks and steps as above nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1 nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1 nfeat_per_block = orient*cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell # window = 64 nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step # Compute individual channel HOG features for the entire image hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) bbox_list = [] for xb in range(nxsteps): for yb in range(nysteps): ypos = yb*cells_per_step xpos = xb*cells_per_step img_features = [] # Extract HOG for this patch if hog_feat: hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) img_features.append(hog_features) xleft = xpos*pix_per_cell ytop = ypos*pix_per_cell # Extract the image patch subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64)) # Get color features if spatial_feat: spatial_features = bin_spatial(subimg, size=spatial_size) img_features.append(spatial_features) if hist_feat: hist_features = color_hist(subimg, nbins=hist_bins) img_features.append(hist_features) # Scale features and make a prediction test_features = x_scaler.transform(np.hstack(img_features).reshape(1, -1)) #test_features = x_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) test_prediction = svc.predict(test_features) if test_prediction == 1: xbox_left = np.int(xleft*scale) ytop_draw = np.int(ytop*scale) win_draw = np.int(window*scale) bbox = ((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)) bbox_list.append(bbox) # cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6) return bbox_list def multiscale_search(img, svc, x_scaler, param): window = 64 # ystart = [400]*3 # # ystop = [int(720-window*2), int(720-window*1.5), int(720-window)] # ystop = [720-window]*3 # scale = [1, 1.5, 2] ystart = 400 ystop = [int(720-window*2.2), int(720-window*1.8), int(720-window*1.5), int(720-window*1.3)] scale = [1.3, 1.5, 1.8, 2.2] bbox_list = [] for i in range(len(scale)): bbox_list.extend(find_cars(img, window, ystart, ystop[i], scale[i], svc, x_scaler, param['color_space'], param['orient'], param['pix_per_cell'], param['cell_per_block'], param['spatial_size'], param['hist_bins'], param['spatial_feat'], param['hist_feat'], param['hog_feat'])) return bbox_list
gpl-3.0
shenzebang/scikit-learn
examples/classification/plot_classifier_comparison.py
181
4699
#!/usr/bin/python # -*- coding: utf-8 -*- """ ===================== Classifier comparison ===================== A comparison of a several classifiers in scikit-learn on synthetic datasets. The point of this example is to illustrate the nature of decision boundaries of different classifiers. This should be taken with a grain of salt, as the intuition conveyed by these examples does not necessarily carry over to real datasets. Particularly in high-dimensional spaces, data can more easily be separated linearly and the simplicity of classifiers such as naive Bayes and linear SVMs might lead to better generalization than is achieved by other classifiers. The plots show training points in solid colors and testing points semi-transparent. The lower right shows the classification accuracy on the test set. """ print(__doc__) # Code source: Gaël Varoquaux # Andreas Müller # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.lda import LDA from sklearn.qda import QDA h = .02 # step size in the mesh names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), AdaBoostClassifier(), GaussianNB(), LDA(), QDA()] X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable ] figure = plt.figure(figsize=(27, 9)) i = 1 # iterate over datasets for ds in datasets: # preprocess dataset, split into training and test part X, y = ds X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 figure.subplots_adjust(left=.02, right=.98) plt.show()
bsd-3-clause
pbmanis/acq4
acq4/analysis/modules/IVCurve/IVCurve.py
3
89452
# -*- coding: utf-8 -*- from __future__ import print_function """ IVCurve: Analysis module that analyzes current-voltage and firing relationships from current clamp data. This is part of Acq4 Paul B. Manis, Ph.D. 2011-2013. Pep8 compliant (via pep8.py) 10/25/2013 Refactoring begun 3/21/2015 """ from collections import OrderedDict import os import os.path # import traceback import itertools import functools # import gc import numpy as np import scipy from acq4.util import Qt # from acq4.util import DataManager from acq4.analysis.AnalysisModule import AnalysisModule import acq4.pyqtgraph as pg # from acq4.pyqtgraph import configfile # from acq4.util.metaarray import MetaArray import acq4.util.matplotlibexporter as matplotlibexporter import acq4.analysis.tools.Utility as Utility # pbm's utilities... import acq4.analysis.tools.Fitting as Fitting # pbm's fitting stuff... import acq4.analysis.tools.ScriptProcessor as ScriptProcessor from . import ctrlTemplate import pprint import time # noinspection PyPep8 class IVCurve(AnalysisModule): """ IVCurve is an Analysis Module for Acq4. IVCurve performs analyses of current-voltage relationships in electrophysiology experiments. The module is interactive, and is primarily designed to allow a preliminary examination of data collected in current clamp and voltage clamp. Results analyzed include: Resting potential (average RMP through the episodes in the protocol). Input resistance (maximum slope if IV relationship below Vrest) Cell time constant (single exponential fit) Ih Sag amplitude and tau Spike rate as a function of injected current Interspike interval as a function of time for each current level RMP as a function of time through the protocol """ def __init__(self, host): AnalysisModule.__init__(self, host) self.Clamps = self.dataModel.GetClamps() # access the "GetClamps" class for reading data self.data_template = ( OrderedDict([('Species', (12, '{:>12s}')), ('Age', (5, '{:>5s}')), ('Sex', (3, '{:>3s}')), ('Weight', (6, '{:>6s}')), ('Temperature', (10, '{:>10s}')), ('ElapsedTime', (11, '{:>11.2f}')), ('RMP', (5, '{:>5.1f}')), ('Rin', (5, '{:>5.1f}')), ('Bridge', (5, '{:>5.1f}')), ('tau', (5, '{:>5.1f}')), ('AdaptRatio', (9, '{:>9.3f}')), ('tauh', (5, '{:>5.1f}')), ('Gh', (6, '{:>6.2f}')), ('FiringRate', (12, '{:>9.1f}')), ('AP1_HalfWidth', (13, '{:>13.2f}')), ('AP1_Latency', (11, '{:>11.1f}')), ('AP2_HalfWidth', (13, '{:>13.2f}')), ('AP2_Latency', (11, '{:>11.1f}')), ('AHP_Depth', (9, '{:9.2f}')), ('Description', (11, '{:s}')), ])) self.Script = ScriptProcessor.ScriptProcessor(host) self.Script.setAnalysis(analysis=self.updateAnalysis, fileloader = self.loadFileRequested, template = self.data_template, clamps = self.Clamps, printer=self.printAnalysis, dbupdate=self.dbStoreClicked) # specify the routines to be called and data sets to be used self.loaded = None self.filename = None self.dirsSet = None self.lrss_flag = True # show is default self.lrpk_flag = True self.rmp_flag = True self.bridgeCorrection = None # bridge correction in Mohm. self.showFISI = True # show FISI or ISI as a function of spike number (when False) self.lrtau_flag = False self.regions_exist = False self.tauh_fits = {} self.tauh_fitted = {} self.tau_fits = {} self.tau_fitted = {} self.regions_exist = False self.regions = {} self.analysis_summary = {} self.tx = None self.keep_analysis_count = 0 self.dataMarkers = [] self.doUpdates = True self.colors = ['w', 'g', 'b', 'r', 'y', 'c'] self.symbols = ['o', 's', 't', 'd', '+'] self.color_list = itertools.cycle(self.colors) self.symbol_list = itertools.cycle(self.symbols) self.script_header = False self.Clamps.data_mode = 'IC' # analysis depends on the type of data we have. self.clear_results() # --------------graphical elements----------------- self._sizeHint = (1280, 900) # try to establish size of window self.ctrlWidget = Qt.QWidget() self.ctrl = ctrlTemplate.Ui_Form() self.ctrl.setupUi(self.ctrlWidget) self.main_layout = pg.GraphicsView() # instead of GraphicsScene? # make fixed widget for the module output self.widget = Qt.QWidget() self.gridLayout = Qt.QGridLayout() self.widget.setLayout(self.gridLayout) self.gridLayout.setContentsMargins(4, 4, 4, 4) self.gridLayout.setSpacing(1) # Setup basic GUI self._elements_ = OrderedDict([ ('File Loader', {'type': 'fileInput', 'size': (170, 50), 'host': self}), ('Parameters', {'type': 'ctrl', 'object': self.ctrlWidget, 'host': self, 'size': (160, 700)}), ('Plots', {'type': 'ctrl', 'object': self.widget, 'pos': ('right',), 'size': (400, 700)}), ]) self.initializeElements() self.file_loader_instance = self.getElement('File Loader', create=True) # grab input form the "Ctrl" window self.ctrl.IVCurve_Update.clicked.connect(self.updateAnalysis) self.ctrl.IVCurve_PrintResults.clicked.connect( functools.partial(self.printAnalysis, printnow=True, script_header=True)) if not matplotlibexporter.HAVE_MPL: self.ctrl.IVCurve_MPLExport.setEnabled = False # make button inactive # self.ctrl.IVCurve_MPLExport.clicked.connect(self.matplotlibExport) else: self.ctrl.IVCurve_MPLExport.clicked.connect( functools.partial(matplotlibexporter.matplotlibExport, gridlayout=self.gridLayout, title=self.filename)) self.ctrl.IVCurve_KeepAnalysis.clicked.connect(self.resetKeepAnalysis) self.ctrl.IVCurve_getFileInfo.clicked.connect(self.get_file_information) [self.ctrl.IVCurve_RMPMode.currentIndexChanged.connect(x) for x in [self.update_rmpAnalysis, self.analyzeSpikes]] self.ctrl.IVCurve_FISI_ISI_button.clicked.connect(self.displayFISI_ISI) self.ctrl.dbStoreBtn.clicked.connect(self.dbStoreClicked) self.ctrl.IVCurve_OpenScript_Btn.clicked.connect(self.read_script) self.ctrl.IVCurve_RunScript_Btn.clicked.connect(self.rerun_script) self.ctrl.IVCurve_PrintScript_Btn.clicked.connect(self.Script.print_script_output) #self.scripts_form.PSPReversal_ScriptCopy_Btn.clicked.connect(self.copy_script_output) #self.scripts_form.PSPReversal_ScriptFormatted_Btn.clicked.connect(self.print_formatted_script_output) self.ctrl.IVCurve_ScriptName.setText('None') self.layout = self.getElement('Plots', create=True) # instantiate the graphs using a gridLayout (also facilitates matplotlib export; see export routine below) self.data_plot = pg.PlotWidget() self.gridLayout.addWidget(self.data_plot, 0, 0, 3, 1) self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data') self.cmd_plot = pg.PlotWidget() self.gridLayout.addWidget(self.cmd_plot, 3, 0, 1, 1) self.label_up(self.cmd_plot, 'T (s)', 'I (A)', 'Command') self.RMP_plot = pg.PlotWidget() self.gridLayout.addWidget(self.RMP_plot, 1, 1, 1, 1) self.label_up(self.RMP_plot, 'T (s)', 'V (mV)', 'RMP') self.fiPlot = pg.PlotWidget() self.gridLayout.addWidget(self.fiPlot, 2, 1, 1, 1) self.label_up(self.fiPlot, 'I (pA)', 'Spikes (#)', 'F-I') self.fslPlot = pg.PlotWidget() self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1) self.label_up(self.fslPlot, 'I (pA)', 'Fsl/Fisi (ms)', 'FSL/FISI') self.IV_plot = pg.PlotWidget() self.gridLayout.addWidget(self.IV_plot, 0, 1, 1, 1) self.label_up(self.IV_plot, 'I (pA)', 'V (V)', 'I-V') for row, s in enumerate([20, 10, 10, 10]): self.gridLayout.setRowStretch(row, s) # self.tailPlot = pg.PlotWidget() # self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1) # self.label_up(self.tailPlot, 'V (V)', 'I (A)', 'Tail Current') # Add a color scale self.color_scale = pg.GradientLegend((20, 150), (-10, -10)) self.data_plot.scene().addItem(self.color_scale) self.ctrl.pushButton.clicked.connect(functools.partial(self.initialize_regions, reset=True)) def clear_results(self): """ Clear results resets variables. This is typically needed every time a new data set is loaded. """ self.filename = '' self.r_in = 0.0 self.tau = 0.0 self.adapt_ratio = 0.0 self.spikes_counted = False self.nospk = [] self.spk = [] self.Sequence = '' self.ivss = [] # steady-state IV (window 2) self.ivpk = [] # peak IV (window 1) self.fsl = [] # first spike latency self.fisi = [] # first isi self.rmp = [] # resting membrane potential during sequence self.analysis_summary = {} self.script_header = True def resetKeepAnalysis(self): self.keep_analysis_count = 0 # reset counter. def show_or_hide(self, lrregion='', forcestate=None): """ Show or hide specific regions in the display Parameters ---------- lrregion : str, default: '' name of the region('lrwin0', etc) forcestate : None or Boolean, default: None Set True to force the show status, False to Hide. If forcestate is None, then uses the region's 'shstate' value to set the state. Returns ------- nothing """ if lrregion == '': print('PSPReversal:show_or_hide:: lrregion is {:<s}'.format(lrregion)) return region = self.regions[lrregion] if forcestate is not None: if forcestate: region['region'].show() region['state'].setChecked(Qt.Qt.Checked) region['shstate'] = True else: region['region'].hide() region['state'].setChecked(Qt.Qt.Unchecked) region['shstate'] = False else: if not region['shstate']: region['region'].show() region['state'].setChecked(Qt.Qt.Checked) region['shstate'] = True else: region['region'].hide() region['state'].setChecked(Qt.Qt.Unchecked) region['shstate'] = False def displayFISI_ISI(self): """ Control display of first interspike interval/first spike latency versus ISI over time. """ if self.showFISI: # currently showin FISI/FSL; switch to ISI over time self.showFISI = False else: self.showFISI = True self.update_SpikePlots() def initialize_regions(self, reset=False): """ initialize_regions sets the linear regions on the displayed data Here we create the analysis regions in the plot. However, this should NOT happen until the plot has been created Note the the information about each region is held in a dictionary, which for each region has a dictionary that accesses the UI and class methods for that region. This later simplifies the code and reduces repetitive sections. """ # hold all the linear regions in a dictionary if not self.regions_exist: self.regions['lrleak'] = {'name': 'leak', # use a "leak" window 'region': pg.LinearRegionItem([0, 1], orientation=pg.LinearRegionItem.Horizontal, brush=pg.mkBrush(255, 255, 0, 50.)), 'plot': self.cmd_plot, 'state': self.ctrl.IVCurve_subLeak, 'shstate': False, # keep internal copy of the state 'mode': self.ctrl.IVCurve_subLeak.isChecked(), 'start': self.ctrl.IVCurve_LeakMin, 'stop': self.ctrl.IVCurve_LeakMax, 'updater': self.updateAnalysis, 'units': 'pA'} self.ctrl.IVCurve_subLeak.region = self.regions['lrleak']['region'] # save region with checkbox self.regions['lrwin0'] = {'name': 'win0', # peak window 'region': pg.LinearRegionItem([0, 1], brush=pg.mkBrush(128, 128, 128, 50.)), 'plot': self.data_plot, 'state': self.ctrl.IVCurve_showHide_lrpk, 'shstate': True, # keep internal copy of the state 'mode': None, 'start': self.ctrl.IVCurve_pkTStart, 'stop': self.ctrl.IVCurve_pkTStop, 'updater': self.updateAnalysis, 'units': 'ms'} self.ctrl.IVCurve_showHide_lrpk.region = self.regions['lrwin0']['region'] # save region with checkbox self.regions['lrwin1'] = {'name': 'win2', # ss window 'region': pg.LinearRegionItem([0, 1], brush=pg.mkBrush(0, 0, 255, 50.)), 'plot': self.data_plot, 'state': self.ctrl.IVCurve_showHide_lrss, 'shstate': True, # keep internal copy of the state 'mode': None, 'start': self.ctrl.IVCurve_ssTStart, 'stop': self.ctrl.IVCurve_ssTStop, 'updater': self.updateAnalysis, 'units': 'ms'} self.ctrl.IVCurve_showHide_lrss.region = self.regions['lrwin1']['region'] # save region with checkbox # self.lrtau = pg.LinearRegionItem([0, 1], # brush=pg.mkBrush(255, 0, 0, 50.)) self.regions['lrrmp'] = {'name': 'rmp', 'region': pg.LinearRegionItem([0, 1], brush=pg.mkBrush (255, 255, 0, 25.)), 'plot': self.data_plot, 'state': self.ctrl.IVCurve_showHide_lrrmp, 'shstate': True, # keep internal copy of the state 'mode': None, 'start': self.ctrl.IVCurve_rmpTStart, 'stop': self.ctrl.IVCurve_rmpTStop, 'updater': self.update_rmpAnalysis, 'units': 'ms'} self.ctrl.IVCurve_showHide_lrrmp.region = self.regions['lrrmp']['region'] # save region with checkbox # establish that measurement is on top, exclusion is next, and reference is on bottom self.regions['lrtau'] = {'name': 'tau', 'region': pg.LinearRegionItem([0, 1], brush=pg.mkBrush (255, 255, 0, 25.)), 'plot': self.data_plot, 'state': self.ctrl.IVCurve_showHide_lrtau, 'shstate': False, # keep internal copy of the state 'mode': None, 'start': self.ctrl.IVCurve_tau2TStart, 'stop': self.ctrl.IVCurve_tau2TStop, 'updater': self.update_Tauh, 'units': 'ms'} self.ctrl.IVCurve_showHide_lrtau.region = self.regions['lrtau']['region'] # save region with checkbox self.regions['lrwin0']['region'].setZValue(500) self.regions['lrwin1']['region'].setZValue(100) self.regions['lrtau']['region'].setZValue(1000) self.regions['lrrmp']['region'].setZValue(1000) self.regions['lrleak']['region'].setZValue(1000) for regkey, reg in self.regions.items(): # initialize region states self.show_or_hide(lrregion=regkey, forcestate=reg['shstate']) for regkey, reg in self.regions.items(): reg['plot'].addItem(reg['region']) reg['state'].clicked.connect(functools.partial(self.show_or_hide, lrregion=regkey)) if reg['updater'] is not None: reg['region'].sigRegionChangeFinished.connect( functools.partial(reg['updater'], region=reg['name'])) # if self.regions[reg]['mode'] is not None: # self.regions[reg]['mode'].currentIndexChanged.connect(self.interactive_analysis) if reset: for regkey, reg in self.regions.items(): # initialize region states self.show_or_hide(lrregion=regkey, forcestate=reg['shstate']) for reg in self.regions.values(): for s in ['start', 'stop']: reg[s].setSuffix(' ' + reg['units']) self.regions_exist = True def get_file_information(self, default_dh=None): """ get_file_information reads the sequence information from the currently selected data file Two-dimensional sequences are supported. Parameter --------- default_dh : data handle, default None the data handle to use to access the file information Return ------ nothing: """ if default_dh is None: dh = self.file_loader_instance.selectedFiles() else: dh = default_dh if not dh or len(dh) == 0: # when using scripts, the fileloader may not know.. return dh = dh[0] # only the first file sequence = self.dataModel.listSequenceParams(dh) keys = list(sequence.keys()) leftseq = [str(x) for x in sequence[keys[0]]] if len(keys) > 1: rightseq = [str(x) for x in sequence[keys[1]]] else: rightseq = [] leftseq.insert(0, 'All') rightseq.insert(0, 'All') ### specific to our program - relocate self.ctrl.IVCurve_Sequence1.clear() self.ctrl.IVCurve_Sequence2.clear() self.ctrl.IVCurve_Sequence1.addItems(leftseq) self.ctrl.IVCurve_Sequence2.addItems(rightseq) self.sequence = sequence def updaterStatus(self, mode='on'): """ Change the auto updater status """ for regkey, reg in self.regions.items(): if mode in ['on', 'On', True]: self.doUpdates = True reg['region'].sigRegionChangeFinished.connect( functools.partial(reg['updater'], region=reg['name'])) if mode in ['off', 'Off', None, False]: self.doUpdates = False try: reg['region'].sigRegionChangeFinished.disconnect() except: # may already be disconnected...so fail gracefully pass def loadFileRequested(self, dh, analyze=True, bridge=None): """ loadFileRequested is called by "file loader" when a file is requested. FileLoader is provided by the AnalysisModule class dh is the handle to the currently selected directory (or directories) This function loads all of the successive records from the specified protocol. Ancillary information from the protocol is stored in class variables. Extracts information about the commands, sometimes using a rather simplified set of assumptions. Much of the work for reading the data is performed in the GetClamps class in PatchEPhys. :param dh: the directory handle (or list of handles) representing the selected entitites from the FileLoader in the Analysis Module :modifies: plots, sequence, data arrays, data mode, etc. :return: True if successful; otherwise raises an exception """ self.data_plot.clearPlots() self.cmd_plot.clearPlots() self.clear_results() self.updaterStatus('Off') if len(dh) == 0: raise Exception("IVCurve::loadFileRequested: " + "Select an IV protocol directory.") if len(dh) != 1: raise Exception("IVCurve::loadFileRequested: " + "Can only load one file at a time.") self.get_file_information(default_dh=dh) # Get info from most recent file requested dh = dh[0] # just get the first one self.filename = dh.name() self.current_dirhandle = dh # this is critical! self.loaded = dh self.analysis_summary = self.dataModel.cell_summary(dh) # get other info as needed for the protocol # print 'analysis summary: ', self.analysis_summary pars = {} # need to pass some parameters from the GUI pars['limits'] = self.ctrl.IVCurve_IVLimits.isChecked() # checkbox: True if loading limited current range pars['cmin'] = self.ctrl.IVCurve_IVLimitMin.value() # minimum current level to load pars['cmax'] = self.ctrl.IVCurve_IVLimitMax.value() # maximum current level to load pars['KeepT'] = self.ctrl.IVCurve_KeepT.isChecked() # keep timebase # sequence selections: # pars[''sequence'] is a dictionary # The dictionary has 'index' (currentIndex()) and 'count' from the GUI pars['sequence1'] = {'index': [self.ctrl.IVCurve_Sequence1.currentIndex() - 1]} pars['sequence1']['count'] = self.ctrl.IVCurve_Sequence1.count() - 1 pars['sequence2'] = {'index': [self.ctrl.IVCurve_Sequence2.currentIndex() - 1]} pars['sequence2']['count'] = self.ctrl.IVCurve_Sequence2.count() - 1 ci = self.Clamps.getClampData(dh, pars) if ci is None: return False self.ctrl.IVCurve_dataMode.setText(self.Clamps.data_mode) # self.bridgeCorrection = 200e6 # print 'bridge: ', bridge if bridge is not None: self.bridgeCorrection = bridge self.ctrl.IVCurve_bridge.setValue(self.bridgeCorrection) #for i in range(self.Clamps.traces.shape[0]): print('******** Doing bridge correction: ', self.bridgeCorrection) self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave) else: br = self.ctrl.IVCurve_bridge.value()*1e6 # print 'br: ', br if br != 0.0: self.bridgeCorrection = br self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave) else: self.bridgeCorrection = None # now plot the data self.ctrl.IVCurve_tauh_Commands.clear() self.ctrl.IVCurve_tauh_Commands.addItems(ci['cmdList']) self.color_scale.setIntColorScale(0, len(ci['dirs']), maxValue=200) self.make_map_symbols() self.plot_traces() self.setup_regions() self.get_window_analysisPars() # prepare the analysis parameters self.updaterStatus('on') # re-enable update status if analyze: # only do this if requested (default). Don't do in script processing ....yet self.updateAnalysis() return True def plot_traces(self, multimode=False): """ Plot the current data traces. :param multimode: try using "multiline plot routine" to speed up plots (no color though) :return: nothing """ if self.ctrl.IVCurve_KeepAnalysis.isChecked(): self.keep_analysis_count += 1 else: self.keep_analysis_count = 0 # always make sure is reset # this is the only way to reset iterators. self.color_list = itertools.cycle(self.colors) self.symbol_list = itertools.cycle(self.symbols) self.clearDecorators() self.make_map_symbols() self.data_plot.plotItem.clearPlots() self.cmd_plot.plotItem.clearPlots() ntr = self.Clamps.traces.shape[0] self.data_plot.setDownsampling(auto=False, mode='mean') self.data_plot.setClipToView(False) # setting True deletes some points used for decoration of spikes by shape self.cmd_plot.setDownsampling(auto=False, mode='mean') self.cmd_plot.setClipToView(True) # can leave this true since we do not put symbols on the plot self.data_plot.disableAutoRange() self.cmd_plot.disableAutoRange() cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use if multimode: pass # datalines = MultiLine(self.Clamps.time_base, self.Clamps.traces, downsample=10) # self.data_plot.addItem(datalines) # cmdlines = MultiLine(self.Clamps.time_base, self.Clamps.cmd_wave, downsample=10) # self.cmd_plot.addItem(cmdlines) else: for i in range(ntr): atrace = self.Clamps.traces[i] acmdwave = self.Clamps.cmd_wave[i] self.data_plot.plot(x=self.Clamps.time_base, y=atrace, downSample=10, downSampleMethod='mean', pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255)) self.cmd_plot.plot(x=self.Clamps.time_base, y=acmdwave, downSample=10, downSampleMethod='mean', pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255)) if self.Clamps.data_mode in self.dataModel.ic_modes: self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data') self.label_up(self.cmd_plot, 'T (s)', 'I (%s)' % self.Clamps.command_units, 'Data') elif self.Clamps.data_mode in self.dataModel.vc_modes: # voltage clamp self.label_up(self.data_plot, 'T (s)', 'I (A)', 'Data') self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data') else: # mode is not known: plot both as V self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data') self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data') self.data_plot.autoRange() self.cmd_plot.autoRange() def setup_regions(self): """ Initialize the positions of the lr regions on the display. We attempt to use a logical set of values based on the timing of command steps and stimulus events :return: """ self.initialize_regions() # now create the analysis regions, if not already existing if self.ctrl.IVCurve_KeepT.isChecked() is False: # change regions; otherwise keep... tstart_pk = self.Clamps.tstart tdur_pk = self.Clamps.tdur * 0.4 # use first 40% of trace for peak tstart_ss = self.Clamps.tstart + 0.75 * self.Clamps.tdur tdur_ss = self.Clamps.tdur * 0.25 tstart_tau = self.Clamps.tstart + 0.1 * self.Clamps.tdur tdur_tau = 0.9 * self.Clamps.tdur # tauh window self.regions['lrtau']['region'].setRegion([tstart_tau, tstart_tau + tdur_tau]) # peak voltage window self.regions['lrwin0']['region'].setRegion([tstart_pk, tstart_pk + tdur_pk]) # steady-state meausurement: self.regions['lrwin1']['region'].setRegion([tstart_ss, tstart_ss + tdur_ss]) # rmp measurement self.regions['lrrmp']['region'].setRegion([0., self.Clamps.tstart * 0.9]) # rmp window # print 'rmp window region: ', self.Clamps.tstart * 0.9 for r in ['lrtau', 'lrwin0', 'lrwin1', 'lrrmp']: self.regions[r]['region'].setBounds([0., np.max(self.Clamps.time_base)]) # limit regions to data def get_window_analysisPars(self): """ Retrieve the settings of the lr region windows, and some other general values in preparation for analysis :return: """ self.analysis_parameters = {} # start out empty so we are not fooled by priors for region in ['lrleak', 'lrwin0', 'lrwin1', 'lrrmp', 'lrtau']: rgninfo = self.regions[region]['region'].getRegion() # from the display self.regions[region]['start'].setValue(rgninfo[0] * 1.0e3) # report values to screen self.regions[region]['stop'].setValue(rgninfo[1] * 1.0e3) self.analysis_parameters[region] = {'times': rgninfo} # for region in ['lrwin0', 'lrwin1', 'lrwin2']: # if self.regions[region]['mode'] is not None: # self.analysis_parameters[region]['mode'] = self.regions[region]['mode'].currentText() # self.get_alternation() # get values into the analysisPars dictionary # self.get_baseline() # self.get_junction() def updateAnalysis(self, presets=None, region=None): """updateAnalysis re-reads the time parameters and re-analyzes the spikes""" # print 'self.Script.script: ', self.Script.script['Cells'].keys() if presets in [True, False]: presets = None # print '\n\n*******\n', traceback.format_stack(limit=7) if presets is not None and type(presets) == type({}): # copy from dictionary of presets into analysis parameters for k in presets.keys(): self.analysis_summary[k] = presets[k] if 'SpikeThreshold' in presets.keys(): self.ctrl.IVCurve_SpikeThreshold.setValue(float(presets['SpikeThreshold'])) #print 'set threshold to %f' % float(presets['SpikeThreshold']) if 'bridgeCorrection' in presets.keys(): self.bridgeCorrection = presets['bridgeCorrection'] print('####### BRIDGE CORRRECTION #######: ', self.bridgeCorrection) else: self.bridgeCorrection = 0. self.get_window_analysisPars() # print 'updateanalysis: readparsupdate' self.readParsUpdate(clearFlag=True, pw=False) def readParsUpdate(self, clearFlag=False, pw=False): """ Read the parameter window entries, set the lr regions to the values in the window, and do an update on the analysis Parameters ---------- clearFlag : Boolean, False appears to be unused pw : Boolean, False appears to be unused """ if not self.doUpdates: return # analyze spikes first (gets information on which traces to exclude/include for other calculations) # print 'readparsupdate, calling analyze spikes' self.analyzeSpikes() self.analysis_summary['tauh'] = np.nan # define these because they may not get filled... self.analysis_summary['Gh'] = np.nan (pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol() # update RMP first as we might need it for the others. if self.ctrl.IVCurve_showHide_lrrmp.isChecked(): rgnx1 = self.ctrl.IVCurve_rmpTStart.value() / 1.0e3 rgnx2 = self.ctrl.IVCurve_rmpTStop.value() / 1.0e3 self.regions['lrrmp']['region'].setRegion([rgnx1, rgnx2]) self.update_rmpAnalysis(clear=clearFlag, pw=pw) if self.ctrl.IVCurve_showHide_lrss.isChecked(): rgnx1 = self.ctrl.IVCurve_ssTStart.value() / 1.0e3 rgnx2 = self.ctrl.IVCurve_ssTStop.value() / 1.0e3 self.regions['lrwin1']['region'].setRegion([rgnx1, rgnx2]) self.update_ssAnalysis() if self.ctrl.IVCurve_showHide_lrpk.isChecked(): rgnx1 = self.ctrl.IVCurve_pkTStart.value() / 1.0e3 rgnx2 = self.ctrl.IVCurve_pkTStop.value() / 1.0e3 self.regions['lrwin0']['region'].setRegion([rgnx1, rgnx2]) self.update_pkAnalysis(clear=clearFlag, pw=pw) if self.ctrl.IVCurve_subLeak.isChecked(): rgnx1 = self.ctrl.IVCurve_LeakMin.value() / 1e3 rgnx2 = self.ctrl.IVCurve_LeakMax.value() / 1e3 self.regions['lrleak']['region'].setRegion([rgnx1, rgnx2]) self.update_ssAnalysis() self.update_pkAnalysis() if self.ctrl.IVCurve_showHide_lrtau.isChecked(): # include tau in the list... if the tool is selected rgnx1 = self.ctrl.IVCurve_tau2TStart.value() / 1e3 rgnx2 = self.ctrl.IVCurve_tau2TStop.value() / 1e3 self.regions['lrtau']['region'].setRegion([rgnx1, rgnx2]) self.update_Tauh() if self.ctrl.IVCurve_PeakMode.currentIndexChanged: self.peakmode = self.ctrl.IVCurve_PeakMode.currentText() self.update_pkAnalysis() self.analyzeSpikeShape() # finally do the spike shape self.ctrl.IVCurve_bridge.setValue(0.) # reset bridge value after analysis. def read_script(self, name=''): """ read a script file from disk, and use that information to drive the analysis :param name: :return: """ self.script_name = self.Script.read_script() if self.script_name is None: print('Failed to read script') self.ctrl.IVCurve_ScriptName.setText('None') return self.ctrl.IVCurve_ScriptName.setText(os.path.basename(self.script_name)) self.Script.run_script() def rerun_script(self): """ revalidate and run the current script :return: """ self.Script.run_script() def analyzeSpikes(self): """ analyzeSpikes: Using the threshold set in the control panel, count the number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend) Updates the spike plot(s). The following variables are set: self.spikecount: a 1-D numpy array of spike counts, aligned with the current (command) self.adapt_ratio: the adaptation ratio of the spike train self.fsl: a numpy array of first spike latency for each command level self.fisi: a numpy array of first interspike intervals for each command level self.nospk: the indices of command levels where no spike was detected self.spk: the indices of command levels were at least one spike was detected """ if self.keep_analysis_count == 0: clearFlag = True else: clearFlag = False self.analysis_summary['FI_Curve'] = None # print '***** analyzing Spikes' if self.Clamps.data_mode not in self.dataModel.ic_modes or self.Clamps.time_base is None: print('IVCurve::analyzeSpikes: Cannot count spikes, ' + 'and dataMode is ', self.Clamps.data_mode, 'and ICModes are: ', self.dataModel.ic_modes, 'tx is: ', self.tx) self.spikecount = [] self.fiPlot.plot(x=[], y=[], clear=clearFlag, pen='w', symbolSize=6, symbolPen='b', symbolBrush=(0, 0, 255, 200), symbol='s') self.fslPlot.plot(x=[], y=[], pen='w', clear=clearFlag, symbolSize=6, symbolPen='g', symbolBrush=(0, 255, 0, 200), symbol='t') self.fslPlot.plot(x=[], y=[], pen='w', symbolSize=6, symbolPen='y', symbolBrush=(255, 255, 0, 200), symbol='s') return twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second minspk = 4 maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3 self.analysis_summary['SpikeThreshold'] = self.ctrl.IVCurve_SpikeThreshold.value() ntr = len(self.Clamps.traces) self.spikecount = np.zeros(ntr) self.fsl = np.zeros(ntr) self.fisi = np.zeros(ntr) ar = np.zeros(ntr) self.allisi = {} self.spikes = [[] for i in range(ntr)] self.spikeIndices = [[] for i in range(ntr)] #print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend for i in range(ntr): (spikes, spkx) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i], threshold, t0=self.Clamps.tstart, t1=self.Clamps.tend, dt=self.Clamps.sample_interval, mode='peak', # best to use peak for detection interpolate=False, debug=False) if len(spikes) == 0: #print 'no spikes found' continue self.spikes[i] = spikes #print 'found %d spikes in trace %d' % (len(spikes), i) self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes] self.spikecount[i] = len(spikes) self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3 if len(spikes) > 1: self.fisi[i] = (spikes[1] - spikes[0])*1e3 self.allisi[i] = np.diff(spikes)*1e3 # for Adaptation ratio analysis if minspk <= len(spikes) <= maxspk: misi = np.mean(np.diff(spikes[-3:]))*1e3 ar[i] = misi / self.fisi[i] iAR = np.where(ar > 0) self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement self.analysis_summary['AdaptRatio'] = self.adapt_ratio self.ctrl.IVCurve_AR.setText(u'%7.3f' % self.adapt_ratio) self.nospk = np.where(self.spikecount == 0) self.spk = np.where(self.spikecount > 0)[0] self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount]) # print self.analysis_summary['FI_Curve'] self.spikes_counted = True self.update_SpikePlots() def _timeindex(self, t): return np.argmin(self.Clamps.time_base-t) def analyzeSpikeShape(self, printSpikeInfo=False): # analyze the spike shape. # based on Druckman et al. Cerebral Cortex, 2013 begin_dV = 12.0 # V/s or mV/ms ntr = len(self.Clamps.traces) # print 'analyzespikeshape, self.spk: ', self.spk self.spikeShape = OrderedDict() rmp = np.zeros(ntr) iHold = np.zeros(ntr) for i in range(ntr): if len(self.spikes[i]) == 0: continue trspikes = OrderedDict() if printSpikeInfo: print(np.array(self.Clamps.values)) print(len(self.Clamps.traces)) (rmp[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.traces[i], 0.0, self.Clamps.tstart) (iHold[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.cmd_wave[i], 0.0, self.Clamps.tstart) for j in range(len(self.spikes[i])): thisspike = {'trace': i, 'AP_number': j, 'AP_beginIndex': None, 'AP_endIndex': None, 'peakIndex': None, 'peak_T': None, 'peak_V': None, 'AP_Latency': None, 'AP_beginV': None, 'halfwidth': None, 'trough_T': None, 'trough_V': None, 'peaktotroughT': None, 'current': None, 'iHold': None, 'pulseDuration': None, 'tstart': self.Clamps.tstart} # initialize the structure thisspike['current'] = self.Clamps.values[i] - iHold[i] thisspike['iHold'] = iHold[i] thisspike['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart # in seconds thisspike['peakIndex'] = self.spikeIndices[i][j] thisspike['peak_T'] = self.Clamps.time_base[thisspike['peakIndex']] thisspike['peak_V'] = self.Clamps.traces[i][thisspike['peakIndex']] # max voltage of spike thisspike['tstart'] = self.Clamps.tstart # find the minimum going forward - that is AHP min dt = (self.Clamps.time_base[1]-self.Clamps.time_base[0]) dv = np.diff(self.Clamps.traces[i])/dt k = self.spikeIndices[i][j] + 1 if j < self.spikecount[i] - 1: # find end of spike (top of next, or end of trace) kend = self.spikeIndices[i][j+1] else: kend = len(self.Clamps.traces[i]) try: km = np.argmin(dv[k:kend])+k # find fastst falling point, use that for start of detection except: continue # v = self.Clamps.traces[i][km] # vlast = self.Clamps.traces[i][km] #kmin = np.argmin(np.argmin(dv2[k:kend])) + k # np.argmin(np.fabs(self.Clamps.traces[i][k:kend]))+k kmin = np.argmin(self.Clamps.traces[i][km:kend])+km thisspike['AP_endIndex'] = kmin thisspike['trough_T'] = self.Clamps.time_base[thisspike['AP_endIndex']] thisspike['trough_V'] = self.Clamps.traces[i][kmin] if thisspike['AP_endIndex'] is not None: thisspike['peaktotrough'] = thisspike['trough_T'] - thisspike['peak_T'] k = self.spikeIndices[i][j]-1 if j > 0: kbegin = self.spikeIndices[i][j-1] # trspikes[j-1]['AP_endIndex'] # self.spikeIndices[i][j-1] # index to previ spike start else: kbegin = k - int(0.002/dt) # for first spike - 4 msec prior only if kbegin*dt <= self.Clamps.tstart: kbegin = kbegin + int(0.0002/dt) # 1 msec # revise k to start at max of rising phase try: km = np.argmax(dv[kbegin:k]) + kbegin except: continue if (km - kbegin < 1): km = kbegin + int((k - kbegin)/2.) + 1 kthresh = np.argmin(np.fabs(dv[kbegin:km] - begin_dV)) + kbegin # point where slope is closest to begin thisspike['AP_beginIndex'] = kthresh thisspike['AP_Latency'] = self.Clamps.time_base[kthresh] thisspike['AP_beginV'] = self.Clamps.traces[i][thisspike['AP_beginIndex']] if thisspike['AP_beginIndex'] is not None and thisspike['AP_endIndex'] is not None: halfv = 0.5*(thisspike['peak_V'] + thisspike['AP_beginV']) kup = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['AP_beginIndex']:thisspike['peakIndex']] - halfv)) kup += thisspike['AP_beginIndex'] kdown = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['peakIndex']:thisspike['AP_endIndex']] - halfv)) kdown += thisspike['peakIndex'] if kup is not None and kdown is not None: thisspike['halfwidth'] = self.Clamps.time_base[kdown] - self.Clamps.time_base[kup] thisspike['hw_up'] = self.Clamps.time_base[kup] thisspike['hw_down'] = self.Clamps.time_base[kdown] thisspike['hw_v'] = halfv trspikes[j] = thisspike self.spikeShape[i] = trspikes if printSpikeInfo: pp = pprint.PrettyPrinter(indent=4) for m in sorted(self.spikeShape.keys()): print('----\nTrace: %d has %d APs' % (m, len(list(self.spikeShape[m].keys())))) for n in sorted(self.spikeShape[m].keys()): pp.pprint(self.spikeShape[m][n]) self.analysis_summary['spikes'] = self.spikeShape # save in the summary dictionary too self.analysis_summary['iHold'] = np.mean(iHold) self.analysis_summary['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart self.getClassifyingInfo() # build analysis summary here as well. self.clearDecorators() self.spikeDecorator() def spikeDecorator(self): """ Put markers on the spikes to visually confirm the analysis of thresholds, etc. """ # get colors cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use alllats = [] allpeakt = [] allpeakv = [] for i, trace in enumerate(self.spikeShape): aps = [] tps = [] paps = [] ptps = [] taps = [] ttps = [] hwv = [] tups = [] tdps = [] for j, spk in enumerate(self.spikeShape[trace]): aps.append(self.spikeShape[trace][spk]['AP_beginV']) alllats.append(self.spikeShape[trace][spk]['AP_Latency']) tps.append(self.spikeShape[trace][spk]['AP_Latency']) u =self.data_plot.plot(tps, aps, pen=None, symbol='o', brush=pg.mkBrush('g'), symbolSize=4) self.dataMarkers.append(u) for j, spk in enumerate(self.spikeShape[trace]): paps.append(self.spikeShape[trace][spk]['peak_V']) ptps.append(self.spikeShape[trace][spk]['peak_T']) allpeakt.append(self.spikeShape[trace][spk]['peak_T']+0.01) allpeakv.append(self.spikeShape[trace][spk]['peak_V']) # u = self.data_plot.plot(allpeakt, allpeakv, pen=None, symbol='o', brush=pg.mkBrush('r'), size=2) # self.dataMarkers.append(u) u = self.data_plot.plot(ptps, paps, pen=None, symbol='t', brush=pg.mkBrush('w'), symbolSize=4) self.dataMarkers.append(u) for j, spk in enumerate(self.spikeShape[trace]): taps.append(self.spikeShape[trace][spk]['trough_V']) ttps.append(self.spikeShape[trace][spk]['trough_T']) u = self.data_plot.plot(ttps, taps, pen=None, symbol='+', brush=pg.mkBrush('r'), symbolSize=4) self.dataMarkers.append(u) for j, spk in enumerate(self.spikeShape[trace]): tups.append(self.spikeShape[trace][spk]['hw_up']) tdps.append(self.spikeShape[trace][spk]['hw_down']) hwv.append(self.spikeShape[trace][spk]['hw_v']) u =self.data_plot.plot(tups, hwv, pen=None, symbol='d', brush=pg.mkBrush('c'), symbolSize=4) self.dataMarkers.append(u) d =self.data_plot.plot(tdps, hwv, pen=None, symbol='s', brush=pg.mkBrush('c'), symbolSize=4) self.dataMarkers.append(d) def clearDecorators(self): if len(self.dataMarkers) > 0: [self.dataMarkers[k].clear() for k,m in enumerate(self.dataMarkers)] self.dataMarkers = [] def getIVCurrentThresholds(self): # figure out "threshold" for spike, get 150% and 300% points. nsp = [] icmd = [] for m in sorted(self.spikeShape.keys()): n = len(self.spikeShape[m].keys()) # number of spikes in the trace if n > 0: nsp.append(len(self.spikeShape[m].keys())) icmd.append(self.spikeShape[m][0]['current']) try: iamin = np.argmin(icmd) except: raise ValueError('IVCurve:getIVCurrentThresholds - icmd seems to be ? : ', icmd) imin = np.min(icmd) ia150 = np.argmin(np.abs(1.5*imin-np.array(icmd))) iacmdthr = np.argmin(np.abs(imin-self.Clamps.values)) ia150cmdthr = np.argmin(np.abs(icmd[ia150] - self.Clamps.values)) #print 'thr indices and values: ', iacmdthr, ia150cmdthr, self.Clamps.values[iacmdthr], self.Clamps.values[ia150cmdthr] return (iacmdthr, ia150cmdthr) # return threshold indices into self.Clamps.values array at threshold and 150% point def getClassifyingInfo(self): """ Adds the classifying information according to Druckmann et al., Cerebral Cortex, 2013 to the analysis summary """ (jthr, j150) = self.getIVCurrentThresholds() # get the indices for the traces we need to pull data from if jthr == j150: print('\n%s:' % self.filename) print('Threshold current T and 1.5T the same: using next up value for j150') print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape)) print('1 ', self.spikeShape[jthr][0]['current']*1e12) print('2 ', self.spikeShape[j150+1][0]['current']*1e12) print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12, self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12)) j150 = jthr + 1 if len(self.spikeShape[j150]) >= 1 and self.spikeShape[j150][0]['halfwidth'] is not None: self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3 self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3 else: self.analysis_summary['AP1_Latency'] = np.inf self.analysis_summary['AP1_HalfWidth'] = np.inf if len(self.spikeShape[j150]) >= 2 and self.spikeShape[j150][1]['halfwidth'] is not None: self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3 self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3 else: self.analysis_summary['AP2_Latency'] = np.inf self.analysis_summary['AP2_HalfWidth'] = np.inf rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration # first AHP depth # print 'j150: ', j150 # print self.spikeShape[j150][0].keys() # print self.spikeShape[j150] AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V'] self.analysis_summary['FiringRate'] = rate self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV # pprint.pprint(self.analysis_summary) # except: # raise ValueError ('Failed Classification for cell: %s' % self.filename) def update_Tau_membrane(self, peak_time=None, printWindow=False, whichTau=1, vrange=[-5., -20.]): """ Compute time constant (single exponential) from the onset of the response using lrpk window, and only steps that produce a voltage change between 5 and 20 mV below rest or as specified """ if len(self.Clamps.commandLevels) == 0: # probably not ready yet to do the update. return if self.Clamps.data_mode not in self.dataModel.ic_modes: # only permit in IC return rgnpk = list(self.regions['lrwin0']['region'].getRegion()) Func = 'exp1' # single exponential fit with DC offset. Fits = Fitting.Fitting() if self.rmp == []: self.update_rmpAnalysis() #print self.rmp initpars = [self.rmp*1e-3, 0.010, 0.01] peak_time = None icmdneg = np.where(self.Clamps.commandLevels < -20e-12) maxcmd = np.min(self.Clamps.commandLevels) ineg = np.where(self.Clamps.commandLevels[icmdneg] < 0.0) if peak_time is not None and ineg != np.array([]): rgnpk[1] = np.max(peak_time[ineg[0]]) dt = self.Clamps.sample_interval rgnindx = [int((rgnpk[1]-0.005)/dt), int((rgnpk[1])/dt)] rmps = self.ivbaseline vmeans = np.mean(self.Clamps.traces[:, rgnindx[0]:rgnindx[1]], axis=1) - self.ivbaseline indxs = np.where(np.logical_and((vrange[0]*1e-3 >= vmeans[ineg]), (vmeans[ineg] >= vrange[1]*1e-3))) indxs = list(indxs[0]) whichdata = ineg[0][indxs] # restricts to valid values itaucmd = self.Clamps.commandLevels[ineg] whichaxis = 0 fpar = [] names = [] okdata = [] if len(self.tau_fitted.keys()) > 0: [self.tau_fitted[k].clear() for k in self.tau_fitted.keys()] self.tau_fitted = {} for j, k in enumerate(whichdata): self.tau_fitted[j] = self.data_plot.plot(self.Clamps.time_base, self.Clamps.traces[k], pen=pg.mkPen('w')) (fparx, xf, yf, namesx) = Fits.FitRegion([k], whichaxis, self.Clamps.time_base, self.Clamps.traces, dataType='2d', t0=rgnpk[0], t1=rgnpk[1], fitFunc=Func, fitPars=initpars, method='SLSQP', bounds=[(-0.1, 0.1), (-0.1, 0.1), (0.005, 0.30)]) if not fparx: raise Exception('IVCurve::update_Tau_membrane: Charging tau fitting failed - see log') #print 'j: ', j, len(fpar) if fparx[0][1] < 2.5e-3: # amplitude must be > 2.5 mV to be useful continue fpar.append(fparx[0]) names.append(namesx[0]) okdata.append(k) self.taupars = fpar self.tauwin = rgnpk self.taufunc = Func self.whichdata = okdata taus = [] for j in range(len(fpar)): outstr = "" taus.append(fpar[j][2]) for i in range(0, len(names[j])): outstr += '%s = %f, ' % (names[j][i], fpar[j][i]) if printWindow: print("FIT(%d, %.1f pA): %s " % (whichdata[j], itaucmd[j] * 1e12, outstr)) meantau = np.mean(taus) self.ctrl.IVCurve_Tau.setText(u'%18.1f ms' % (meantau * 1.e3)) self.tau = meantau self.analysis_summary['tau'] = self.tau*1.e3 tautext = 'Mean Tau: %8.1f' if printWindow: print(tautext % (meantau * 1e3)) self.show_tau_plot() def show_tau_plot(self): Fits = Fitting.Fitting() fitPars = self.taupars xFit = np.zeros((len(self.taupars), 500)) for i in range(len(self.taupars)): xFit[i,:] = np.arange(0, self.tauwin[1]-self.tauwin[0], (self.tauwin[1]-self.tauwin[0])/500.) yFit = np.zeros((len(fitPars), xFit.shape[1])) fitfunc = Fits.fitfuncmap[self.taufunc] if len(self.tau_fits.keys()) > 0: [self.tau_fits[k].clear() for k in self.tau_fits.keys()] self.tau_fits = {} for k, whichdata in enumerate(self.whichdata): yFit[k] = fitfunc[0](fitPars[k], xFit[k], C=None) # +self.ivbaseline[whichdata] self.tau_fits[k] = self.data_plot.plot(xFit[k]+self.tauwin[0], yFit[k], pen=pg.mkPen('r', width=2, style=Qt.Qt.DashLine)) def update_Tauh(self, region=None, printWindow=False): """ compute tau (single exponential) from the onset of the markers using lrtau window, and only for the step closest to the selected current level in the GUI window. Parameters ---------- region : dummy argument, default : None printWindow : Boolean, default : False region is a dummy argument... Also compute the ratio of the sag from the peak (marker1) to the end of the trace (marker 2). Based on analysis in Fujino and Oertel, J. Neuroscience 2001, to type cells based on different Ih kinetics and magnitude. """ self.analysis_summary['tauh'] = np.nan self.analysis_summary['Gh'] = np.nan if not self.ctrl.IVCurve_showHide_lrtau.isChecked(): return rgn = self.regions['lrtau']['region'].getRegion() Func = 'exp1' # single exponential fit to the whole region Fits = Fitting.Fitting() initpars = [-80.0 * 1e-3, -10.0 * 1e-3, 50.0 * 1e-3] # find the current level that is closest to the target current s_target = self.ctrl.IVCurve_tauh_Commands.currentIndex() itarget = self.Clamps.values[s_target] # retrive actual value from commands self.neg_cmd = itarget idiff = np.abs(np.array(self.Clamps.commandLevels) - itarget) amin = np.argmin(idiff) # amin appears to be the same as s_target # target trace (as selected in cmd drop-down list): target = self.Clamps.traces[amin] # get Vrmp - # rmp approximation. vrmp = np.median(target['Time': 0.0:self.Clamps.tstart - 0.005]) * 1000. self.neg_vrmp = vrmp # get peak and steady-state voltages pkRgn = self.regions['lrwin0']['region'].getRegion() ssRgn = self.regions['lrwin1']['region'].getRegion() vpk = target['Time': pkRgn[0]:pkRgn[1]].min() * 1000 self.neg_pk = (vpk - vrmp) / 1000. vss = np.median(target['Time': ssRgn[0]:ssRgn[1]]) * 1000 self.neg_ss = (vss - vrmp) / 1000. whichdata = [int(amin)] itaucmd = [self.Clamps.commandLevels[amin]] self.ctrl.IVCurve_tau2TStart.setValue(rgn[0] * 1.0e3) self.ctrl.IVCurve_tau2TStop.setValue(rgn[1] * 1.0e3) fd = self.Clamps.traces['Time': rgn[0]:rgn[1]][whichdata][0] if len(self.tauh_fitted.keys()) > 0: [self.tauh_fitted[k].clear() for k in self.tauh_fitted.keys()] self.tauh_fitted = {} for k, d in enumerate(whichdata): self.tauh_fitted[k] = self.data_plot.plot(fd, pen=pg.mkPen('w')) # now do the fit whichaxis = 0 (fpar, xf, yf, names) = Fits.FitRegion(whichdata, whichaxis, self.Clamps.traces.xvals('Time'), self.Clamps.traces.view(np.ndarray), dataType='2d', t0=rgn[0], t1=rgn[1], fitFunc=Func, fitPars=initpars) if not fpar: raise Exception('IVCurve::update_Tauh: tau_h fitting failed - see log') bluepen = pg.mkPen('b', width=2.0, style=Qt.Qt.DashLine) if len(self.tauh_fits.keys()) > 0: [self.tauh_fits[k].clear() for k in self.tauh_fits.keys()] self.tauh_fits = {} self.tauh_fits[0] = self.data_plot.plot(xf[0]+rgn[0], yf[0], pen=bluepen) # self.tauh_fits.update() s = np.shape(fpar) taus = [] for j in range(0, s[0]): outstr = "" taus.append(fpar[j][2]) for i in range(0, len(names[j])): outstr += '%s = %f, ' % (names[j][i], fpar[j][i]) if printWindow: print("Ih FIT(%d, %.1f pA): %s " % (whichdata[j], itaucmd[j] * 1e12, outstr)) meantau = np.mean(taus) self.ctrl.IVCurve_Tauh.setText(u'%8.1f ms' % (meantau * 1.e3)) self.tau2 = meantau bovera = (vss - vrmp) / (vpk - vrmp) self.ctrl.IVCurve_Ih_ba.setText('%8.1f' % (bovera * 100.)) self.ctrl.IVCurve_ssAmp.setText('%8.2f' % (vss - vrmp)) self.ctrl.IVCurve_pkAmp.setText('%8.2f' % (vpk - vrmp)) if bovera < 0.55 and self.tau2 < 0.015: # self.ctrl.IVCurve_FOType.setText('D Stellate') else: self.ctrl.IVCurve_FOType.setText('T Stellate') # estimate of Gh: Gpk = itarget / self.neg_pk Gss = itarget / self.neg_ss self.Gh = Gss - Gpk self.analysis_summary['tauh'] = self.tau2*1.e3 self.analysis_summary['Gh'] = self.Gh self.ctrl.IVCurve_Gh.setText('%8.2f nS' % (self.Gh * 1e9)) def update_ssAnalysis(self): """ Compute the steady-state IV from the selected time window Parameters ---------- None. Returns ------- nothing. modifies: ivss, yleak, ivss_cmd, cmd. The IV curve is only valid when there are no spikes detected in the window. The values in the curve are taken as the mean of the current and the voltage in the time window, at each command step. We also compute the input resistance. For voltage clamp data, we can optionally remove the "leak" current. The resulting curve is plotted. """ if self.Clamps.traces is None: return rgnss = self.regions['lrwin1']['region'].getRegion() r1 = rgnss[1] if rgnss[1] == rgnss[0]: print('Steady-state regions have no width; using 100 msec. window for ss ') r1 = rgnss[0] + 0.1 self.ctrl.IVCurve_ssTStart.setValue(rgnss[0] * 1.0e3) self.ctrl.IVCurve_ssTStop.setValue(r1 * 1.0e3) data1 = self.Clamps.traces['Time': rgnss[0]:r1] # print 'data shape: ', data1.shape if data1.shape[1] == 0 or data1.shape[0] == 1: return # skip it self.ivss = [] # check out whether there are spikes in the window that is selected threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3 ntr = len(self.Clamps.traces) if not self.spikes_counted: print('updatess: spikes not counted yet? ') self.analyzeSpikes() # spikecount = np.zeros(ntr) # for i in range(ntr): # (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i], # threshold, # t0=rgnss[0], t1=r1, # dt=self.Clamps.sample_interval, # mode='schmitt', # interpolate=False, # debug=False) # if len(spike) > 0: # spikecount[i] = len(spike) # nospk = np.where(spikecount == 0) # print 'spikes checked' self.ivss = data1.mean(axis=1) # all traces if self.ctrl.IVCurve_SubBaseline.isChecked(): self.ivss = self.ivss - self.ivbaseline if len(self.nospk) >= 1: # Steady-state IV where there are no spikes self.ivss = self.ivss[self.nospk] self.ivss_cmd = self.Clamps.commandLevels[self.nospk] # self.commandLevels = commands[self.nospk] # compute Rin from the SS IV: # this makes the assumption that: # successive trials are in order (as are commands) # commands are not repeated... if len(self.ivss_cmd) > 0 and len(self.ivss) > 0: self.r_in = np.max(np.diff (self.ivss) / np.diff(self.ivss_cmd)) self.ctrl.IVCurve_Rin.setText(u'%9.1f M\u03A9' % (self.r_in * 1.0e-6)) self.analysis_summary['Rin'] = self.r_in*1.0e-6 else: self.ctrl.IVCurve_Rin.setText(u'No valid points') self.yleak = np.zeros(len(self.ivss)) if self.ctrl.IVCurve_subLeak.isChecked(): if self.Clamps.data_mode in self.dataModel.ic_modes: sf = 1e-12 elif self.Clamps.data_mode in self.dataModel.vc_modes: sf = 1e-3 else: sf = 1.0 (x, y) = Utility.clipdata(self.ivss, self.ivss_cmd, self.ctrl.IVCurve_LeakMin.value() * sf, self.ctrl.IVCurve_LeakMax.value() * sf) try: p = np.polyfit(x, y, 1) # linear fit self.yleak = np.polyval(p, self.ivss_cmd) self.ivss = self.ivss - self.yleak except: raise ValueError('IVCurve Leak subtraction: no valid points to correct') isort = np.argsort(self.ivss_cmd) self.ivss_cmd = self.ivss_cmd[isort] self.ivss = self.ivss[isort] self.analysis_summary['IV_Curve_ss'] = [self.ivss_cmd, self.ivss] self.update_IVPlot() def update_pkAnalysis(self, clear=False, pw=False): """ Compute the peak IV (minimum) from the selected window mode can be 'min', 'max', or 'abs' Parameters ---------- clear : Boolean, False pw : Boolean, False pw is passed to update_taumembrane to control printing. """ if self.Clamps.traces is None: return mode = self.ctrl.IVCurve_PeakMode.currentText() rgnpk = self.regions['lrwin0']['region'].getRegion() self.ctrl.IVCurve_pkTStart.setValue(rgnpk[0] * 1.0e3) self.ctrl.IVCurve_pkTStop.setValue(rgnpk[1] * 1.0e3) data2 = self.Clamps.traces['Time': rgnpk[0]:rgnpk[1]] if data2.shape[1] == 0: return # skip it - window missed the data # check out whether there are spikes in the window that is selected # but only in current clamp nospk = [] peak_pos = None if self.Clamps.data_mode in self.dataModel.ic_modes: threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3 ntr = len(self.Clamps.traces) if not self.spikes_counted: print('update_pkAnalysis: spikes not counted') self.analyzeSpikes() spikecount = np.zeros(ntr) # for i in range(ntr): # (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i], # threshold, # t0=rgnpk[0], t1=rgnpk[1], # dt=self.Clamps.sample_interval, # mode='schmitt', # interpolate=False, debug=False) # if len(spike) == 0: # continue # spikecount[i] = len(spike) # nospk = np.where(spikecount == 0) # nospk = np.array(nospk)[0] if mode == 'Min': self.ivpk = data2.min(axis=1) peak_pos = np.argmin(data2, axis=1) elif mode == 'Max': self.ivpk = data2.max(axis=1) peak_pos = np.argmax(data2, axis=1) elif mode == 'Abs': # find largest regardless of the sign ('minormax') x1 = data2.min(axis=1) peak_pos1 = np.argmin(data2, axis=1) x2 = data2.max(axis=1) peak_pos2 = np.argmax(data2, axis=1) self.ivpk = np.zeros(data2.shape[0]) for i in range(data2.shape[0]): if -x1[i] > x2[i]: self.ivpk[i] = x1[i] peak_pos = peak_pos1 else: self.ivpk[i] = x2[i] peak_pos = peak_pos2 # self.ivpk = np.array([np.max(x1[i], x2[i]) for i in range(data2.shape[0]]) #self.ivpk = np.maximum(np.fabs(data2.min(axis=1)), data2.max(axis=1)) if self.ctrl.IVCurve_SubBaseline.isChecked(): self.ivpk = self.ivpk - self.ivbaseline if len(self.nospk) >= 1: # Peak (min, max or absmax voltage) IV where there are no spikes self.ivpk = self.ivpk[self.nospk] self.ivpk_cmd = self.Clamps.commandLevels[self.nospk] else: self.ivpk_cmd = self.Clamps.commandLevels self.ivpk = self.ivpk.view(np.ndarray) if self.ctrl.IVCurve_subLeak.isChecked(): self.ivpk = self.ivpk - self.yleak # now sort data in ascending command levels isort = np.argsort(self.ivpk_cmd) self.ivpk_cmd = self.ivpk_cmd[isort] self.ivpk = self.ivpk[isort] self.analysis_summary['IV_Curve_pk'] = [self.ivpk_cmd, self.ivpk] self.update_IVPlot() peak_time = self.Clamps.time_base[peak_pos] self.update_Tau_membrane(peak_time=peak_time, printWindow=pw) def update_rmpAnalysis(self, **kwargs): """ Compute the RMP over time/commands from the selected window """ if self.Clamps.traces is None: return rgnrmp = self.regions['lrrmp']['region'].getRegion() self.ctrl.IVCurve_rmpTStart.setValue(rgnrmp[0] * 1.0e3) self.ctrl.IVCurve_rmpTStop.setValue(rgnrmp[1] * 1.0e3) data1 = self.Clamps.traces['Time': rgnrmp[0]:rgnrmp[1]] data1 = data1.view(np.ndarray) self.ivbaseline = data1.mean(axis=1) # all traces self.ivbaseline_cmd = self.Clamps.commandLevels self.rmp = np.mean(self.ivbaseline) * 1e3 # convert to mV self.ctrl.IVCurve_vrmp.setText('%8.2f' % self.rmp) self.update_RMPPlot() self.analysis_summary['RMP'] = self.rmp def make_map_symbols(self): """ Given the current state of things, (keeping the analysis, when superimposing multiple results, for example), sets self.currentSymDict with a dict of pen, fill color, empty color, a symbol from our lists, and a clearflag. Used to overplot different data. """ n = self.keep_analysis_count pen = next(self.color_list) filledbrush = pen emptybrush = None symbol = next(self.symbol_list) if n == 0: clearFlag = True else: clearFlag = False self.currentSymDict = {'pen': pen, 'filledbrush': filledbrush, 'emptybrush': emptybrush, 'symbol': symbol, 'n': n, 'clearFlag': clearFlag} def map_symbol(self): cd = self.currentSymDict if cd['filledbrush'] == 'w': cd['filledbrush'] = pg.mkBrush((128, 128, 128)) if cd['pen'] == 'w': cd['pen'] = pg.mkPen((128, 128, 128)) self.lastSymbol = (cd['pen'], cd['filledbrush'], cd['emptybrush'], cd['symbol'], cd['n'], cd['clearFlag']) return self.lastSymbol def update_IVPlot(self): """ Draw the peak and steady-sate IV to the I-V window Note: x axis is always I or V, y axis V or I """ if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False: self.IV_plot.clear() (pen, filledbrush, emptybrush, symbol, n, clearFlag) = \ self.map_symbol() if self.Clamps.data_mode in self.dataModel.ic_modes: if (len(self.ivss) > 0 and self.ctrl.IVCurve_showHide_lrss.isChecked()): self.IV_plot.plot(self.ivss_cmd * 1e12, self.ivss * 1e3, symbol=symbol, pen=pen, symbolSize=6, symbolPen=pen, symbolBrush=filledbrush) if (len(self.ivpk) > 0 and self.ctrl.IVCurve_showHide_lrpk.isChecked()): self.IV_plot.plot(self.ivpk_cmd * 1e12, self.ivpk * 1e3, symbol=symbol, pen=pen, symbolSize=6, symbolPen=pen, symbolBrush=emptybrush) self.label_up(self.IV_plot, 'I (pA)', 'V (mV)', 'I-V (CC)') if self.Clamps.data_mode in self.dataModel.vc_modes: if (len(self.ivss) > 0 and self.ctrl.IVCurve_showHide_lrss.isChecked()): self.IV_plot.plot(self.ivss_cmd * 1e3, self.ivss * 1e9, symbol=symbol, pen=pen, symbolSize=6, symbolPen=pen, symbolBrush=filledbrush) if (len(self.ivpk) > 0 and self.ctrl.IVCurve_showHide_lrpk.isChecked()): self.IV_plot.plot(self.ivpk_cmd * 1e3, self.ivpk * 1e9, symbol=symbol, pen=pen, symbolSize=6, symbolPen=pen, symbolBrush=emptybrush) self.label_up(self.IV_plot, 'V (mV)', 'I (nA)', 'I-V (VC)') def update_RMPPlot(self): """ Draw the RMP to the I-V window Note: x axis can be I, T, or # spikes """ if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False: self.RMP_plot.clear() if len(self.ivbaseline) > 0: (pen, filledbrush, emptybrush, symbol, n, clearFlag) = \ self.map_symbol() mode = self.ctrl.IVCurve_RMPMode.currentIndex() if self.Clamps.data_mode in self.dataModel.ic_modes: sf = 1e3 self.RMP_plot.setLabel('left', 'V mV') else: sf = 1e12 self.RMP_plot.setLabel('left', 'I (pA)') if mode == 0: self.RMP_plot.plot(self.Clamps.trace_StartTimes, sf * np.array(self.ivbaseline), symbol=symbol, pen=pen, symbolSize=6, symbolPen=pen, symbolBrush=filledbrush) self.RMP_plot.setLabel('bottom', 'T (s)') elif mode == 1: self.RMP_plot.plot(self.Clamps.commandLevels, 1.e3 * np.array(self.ivbaseline), symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=filledbrush) self.RMP_plot.setLabel('bottom', 'I (pA)') elif mode == 2: self.RMP_plot.plot(self.spikecount, 1.e3 * np.array(self.ivbaseline), symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=emptybrush) self.RMP_plot.setLabel('bottom', 'Spikes') else: pass def update_SpikePlots(self): """ Draw the spike counts to the FI and FSL windows Note: x axis can be I, T, or # spikes """ if self.Clamps.data_mode in self.dataModel.vc_modes: self.fiPlot.clear() # no plots of spikes in VC self.fslPlot.clear() return (pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol() mode = self.ctrl.IVCurve_RMPMode.currentIndex() # get x axis mode self.spcmd = self.Clamps.commandLevels[self.spk] # get command levels iwth spikes iscale = 1.0e12 # convert to pA yfslsc = 1.0 # convert to msec if mode == 0: # plot with time as x axis xfi = self.Clamps.trace_StartTimes xfsl = self.Clamps.trace_StartTimes select = range(len(self.Clamps.trace_StartTimes)) xlabel = 'T (s)' elif mode == 1: # plot with current as x select = self.spk xfi = self.Clamps.commandLevels * iscale xfsl = self.spcmd * iscale xlabel = 'I (pA)' elif mode == 2: # plot with spike counts as x xfi = self.spikecount xfsl = self.spikecount select = range(len(self.spikecount)) xlabel = 'Spikes (N)' else: return # mode not in available list self.fiPlot.plot(x=xfi, y=self.spikecount, clear=clearFlag, symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=filledbrush) fslmax = 0. if self.showFISI: self.fslPlot.plot(x=xfsl, y=self.fsl[select] * yfslsc, clear=clearFlag, symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=filledbrush) self.fslPlot.plot(x=xfsl, y=self.fisi[select] * yfslsc, symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=emptybrush) if len(xfsl) > 0: self.fslPlot.setXRange(0.0, np.max(xfsl)) self.fslPlot.setYRange(0., max(max(self.fsl[select]), max(self.fisi[select]))) ylabel = 'Fsl/Fisi (ms)' xfsllabel = xlabel self.fslPlot.setTitle('FSL/FISI') else: maxspk = 0 maxisi = 0. clear = clearFlag for i, k in enumerate(self.allisi.keys()): nspk = len(self.allisi[k]) xisi = np.arange(nspk) self.fslPlot.plot(x=xisi, y=self.allisi[k] * yfslsc, clear=clear, symbolSize=6, symbol=symbol, pen=pen, symbolPen=pen, symbolBrush=filledbrush) clear = False maxspk = max(nspk, maxspk) maxisi = max(np.max(self.allisi[k]), maxisi) self.fslPlot.setXRange(0.0, maxspk) self.fslPlot.setYRange(0.0, maxisi) xfsllabel = 'Spike Number' ylabel = 'ISI (s)' self.fslPlot.setTitle('ISI vs. Spike Number') self.fiPlot.setLabel('bottom', xlabel) self.fslPlot.setLabel('bottom', xfsllabel) self.fslPlot.setLabel('left', ylabel) def printAnalysis(self, printnow=True, script_header=True, copytoclipboard=False): """ Print the analysis summary information (Cell, protocol, etc) in a nice formatted version to the terminal. The output can be copied to another program (excel, prism) for further analysis Parameters ---------- printnow : Boolean, optional Set true to print to terminal, default: True script_header : Boolean, optional Set to print the header line, default: True copytoclipboard : Boolean, optional copy the text to the system clipboard, default: False Return ------ ltxt : string The text that would be printed. Might be useful to capture for other purposes """ # Dictionary structure: key = information about if self.Clamps.data_mode in self.dataModel.ic_modes or self.Clamps.data_mode == 'vc': data_template = self.data_template else: data_template = ( OrderedDict([('ElapsedTime', '{:>8.2f}'), ('HoldV', '{:>5.1f}'), ('JP', '{:>5.1f}'), ('Rs', '{:>6.2f}'), ('Cm', '{:>6.1f}'), ('Ru', '{:>6.2f}'), ('Erev', '{:>6.2f}'), ('gsyn_Erev', '{:>9.2f}'), ('gsyn_60', '{:>7.2f}'), ('gsyn_13', '{:>7.2f}'), # ('p0', '{:6.3e}'), ('p1', '{:6.3e}'), ('p2', '{:6.3e}'), ('p3', '{:6.3e}'), ('I_ionic+', '{:>8.3f}'), ('I_ionic-', '{:>8.3f}'), ('ILeak', '{:>7.3f}'), ('win1Start', '{:>9.3f}'), ('win1End', '{:>7.3f}'), ('win2Start', '{:>9.3f}'), ('win2End', '{:>7.3f}'), ('win0Start', '{:>9.3f}'), ('win0End', '{:>7.3f}'), ])) # summary table header is written anew for each cell htxt = '' if script_header: htxt = '{:34s}\t{:15s}\t{:24s}\t'.format("Cell", "Genotype", "Protocol") for k in data_template.keys(): cnv = '{:<%ds}' % (data_template[k][0]) # print 'cnv: ', cnv htxt += (cnv + '\t').format(k) script_header = False htxt += '\n' ltxt = '' if 'Genotype' not in self.analysis_summary.keys(): self.analysis_summary['Genotype'] = 'Unknown' ltxt += '{:34s}\t{:15s}\t{:24s}\t'.format(self.analysis_summary['CellID'], self.analysis_summary['Genotype'], self.analysis_summary['Protocol']) for a in data_template.keys(): if a in self.analysis_summary.keys(): txt = self.analysis_summary[a] if a in ['Description', 'Notes']: txt = txt.replace('\n', ' ').replace('\r', '') # remove line breaks from output, replace \n with space #print a, data_template[a] ltxt += (data_template[a][1]).format(txt) + ' \t' else: ltxt += ('{:>%ds}' % (data_template[a][0]) + '\t').format('NaN') ltxt = ltxt.replace('\n', ' ').replace('\r', '') # remove line breaks ltxt = htxt + ltxt if printnow: print(ltxt) if copytoclipboard: clipb = Qt.QApplication.clipboard() clipb.clear(mode=clipb.Clipboard) clipb.setText(ltxt, mode=clipb.Clipboard) return ltxt def dbStoreClicked(self): """ Store data into the current database for further analysis """ #self.updateAnalysis() if self.loaded is None: return self.dbIdentity = 'IVCurve' # type of data in the database db = self._host_.dm.currentDatabase() # print 'dir (db): ', dir(db) # print 'dir (db.db): ', dir(db.db) # print 'db.listTables: ', db.listTables() # print 'db.tables: ', db.tables # table = self.dbIdentity columns = OrderedDict([ # ('ProtocolDir', 'directory:Protocol'), ('AnalysisDate', 'text'), ('ProtocolSequenceDir', 'directory:ProtocolSequence'), ('Dir', 'text'), ('Protocol', 'text'), ('Genotype', 'text'), ('Celltype', 'text'), ('UseData', 'int'), ('RMP', 'real'), ('R_in', 'real'), ('tau_m', 'real'), ('iHold', 'real'), ('PulseDuration', 'real'), ('neg_cmd', 'real'), ('neg_pk', 'real'), ('neg_ss', 'real'), ('h_tau', 'real'), ('h_g', 'real'), ('SpikeThreshold', 'real'), ('AdaptRatio', 'real'), ('FiringRate', 'real'), ('AP1_HalfWidth', 'real'), ('AP1_Latency', 'real'), ('AP2_HalfWidth', 'real'), ('AP2_Latency', 'real'), ('AHP_Depth', 'real'), ('FI_Curve', 'text'), ('IV_Curve_pk', 'text'), ('IV_Curve_ss', 'text'), ]) if table not in db.tables: db.createTable(table, columns, owner=self.dbIdentity) try: z = self.neg_cmd except: self.neg_cmd = 0. self.neg_pk = 0. self.neg_ss = 0. self.tau2 = 0. self.Gh = 0. if 'Genotype' not in self.analysis_summary: self.analysis_summary['Genotype'] = 'Unknown' # print 'genytope: ', self.analysis_summary['Genotype'] if 'Celltype' not in self.Script.analysis_parameters: self.analysis_summary['Celltype'] = 'Unknown' data = { 'AnalysisDate': time.strftime("%Y-%m-%d %H:%M:%S"), 'ProtocolSequenceDir': self.loaded, # 'ProtocolSequenceDir': self.dataModel.getParent(self.loaded, 'ProtocolSequence'), 'Dir': self.loaded.parent().name(), 'Protocol': self.loaded.name(), 'Genotype': self.analysis_summary['Genotype'], 'Celltype': self.Script.analysis_parameters['Celltype'], # uses global info, not per cell info 'UseData' : 1, 'RMP': self.rmp / 1000., 'R_in': self.r_in, 'tau_m': self.tau, 'iHold': self.analysis_summary['iHold'], 'PulseDuration': self.analysis_summary['pulseDuration'], 'AdaptRatio': self.adapt_ratio, 'neg_cmd': self.neg_cmd, 'neg_pk': self.neg_pk, 'neg_ss': self.neg_ss, 'h_tau': self.analysis_summary['tauh'], 'h_g': self.analysis_summary['Gh'], 'SpikeThreshold': self.analysis_summary['SpikeThreshold'], 'FiringRate': self.analysis_summary['FiringRate'], 'AP1_HalfWidth': self.analysis_summary['AP1_HalfWidth'], 'AP1_Latency': self.analysis_summary['AP1_Latency'], 'AP2_HalfWidth': self.analysis_summary['AP2_HalfWidth'], 'AP2_Latency': self.analysis_summary['AP2_Latency'], 'AHP_Depth': self.analysis_summary['AHP_Depth'], 'FI_Curve': repr(self.analysis_summary['FI_Curve'].tolist()), # convert array to string for storage 'IV_Curve_pk': repr(np.array(self.analysis_summary['IV_Curve_pk']).tolist()), 'IV_Curve_ss': repr(np.array(self.analysis_summary['IV_Curve_ss']).tolist()), } ## If only one record was given, make it into a list of one record if isinstance(data, dict): data = [data] ## Make sure target table exists and has correct columns, links to input file fields = db.describeData(data) ## override directory fields since describeData can't guess these for us # fields['ProtocolDir'] = 'directory:Protocol' fields['ProtocolSequenceDir'] = 'directory:ProtocolSequence' with db.transaction(): db.checkTable(table, owner=self.dbIdentity, columns=fields, create=True, addUnknownColumns=True, indexes=[['ProtocolSequenceDir'],]) dirtable = db.dirTableName(self.loaded) # set up the DirTable Protocol Sequence directory. if not db.hasTable(dirtable): db.createDirTable(self.loaded) # delete old for source in set([d['ProtocolSequenceDir'] for d in data]): db.delete(table, where={'ProtocolSequenceDir': source}) # write new with pg.ProgressDialog("Storing IV Results..", 0, 100) as dlg: for n, nmax in db.iterInsert(table, data, chunkSize=30): dlg.setMaximum(nmax) dlg.setValue(n) if dlg.wasCanceled(): raise HelpfulException("Scan store canceled by user.", msgType='status') #db.close() #db.open() print("Updated record for ", self.loaded.name()) # ---- Helpers ---- # Some of these would normally live in a pyqtgraph-related module, but are # just stuck here to get the job done. # @staticmethod def label_up(plot, xtext, ytext, title): """helper to label up the plot""" plot.setLabel('bottom', xtext) plot.setLabel('left', ytext) plot.setTitle(title)
mit
Patrick-Cole/pygmi
pygmi/rsense/iodefs.py
1
53845
# ----------------------------------------------------------------------------- # Name: iodefs.py (part of PyGMI) # # Author: Patrick Cole # E-Mail: pcole@geoscience.org.za # # Copyright: (c) 2020 Council for Geoscience # Licence: GPL-3.0 # # This file is part of PyGMI # # PyGMI is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyGMI is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- """Import remote sensing data.""" import os import copy import glob import tarfile import zipfile import datetime from PyQt5 import QtWidgets, QtCore import numpy as np from osgeo import gdal, osr import pandas as pd import geopandas as gpd from geopandas import GeoDataFrame from shapely.geometry import Point import pygmi.menu_default as menu_default from pygmi.raster.datatypes import Data from pygmi.vector.dataprep import quickgrid EDIST = {1: 0.98331, 2: 0.9833, 3: 0.9833, 4: 0.9833, 5: 0.9833, 6: 0.98332, 7: 0.98333, 8: 0.98335, 9: 0.98338, 10: 0.98341, 11: 0.98345, 12: 0.98349, 13: 0.98354, 14: 0.98359, 15: 0.98365, 16: 0.98371, 17: 0.98378, 18: 0.98385, 19: 0.98393, 20: 0.98401, 21: 0.9841, 22: 0.98419, 23: 0.98428, 24: 0.98439, 25: 0.98449, 26: 0.9846, 27: 0.98472, 28: 0.98484, 29: 0.98496, 30: 0.98509, 31: 0.98523, 32: 0.98536, 33: 0.98551, 34: 0.98565, 35: 0.9858, 36: 0.98596, 37: 0.98612, 38: 0.98628, 39: 0.98645, 40: 0.98662, 41: 0.9868, 42: 0.98698, 43: 0.98717, 44: 0.98735, 45: 0.98755, 46: 0.98774, 47: 0.98794, 48: 0.98814, 49: 0.98835, 50: 0.98856, 51: 0.98877, 52: 0.98899, 53: 0.98921, 54: 0.98944, 55: 0.98966, 56: 0.98989, 57: 0.99012, 58: 0.99036, 59: 0.9906, 60: 0.99084, 61: 0.99108, 62: 0.99133, 63: 0.99158, 64: 0.99183, 65: 0.99208, 66: 0.99234, 67: 0.9926, 68: 0.99286, 69: 0.99312, 70: 0.99339, 71: 0.99365, 72: 0.99392, 73: 0.99419, 74: 0.99446, 75: 0.99474, 76: 0.99501, 77: 0.99529, 78: 0.99556, 79: 0.99584, 80: 0.99612, 81: 0.9964, 82: 0.99669, 83: 0.99697, 84: 0.99725, 85: 0.99754, 86: 0.99782, 87: 0.99811, 88: 0.9984, 89: 0.99868, 90: 0.99897, 91: 0.99926, 92: 0.99954, 93: 0.99983, 94: 1.00012, 95: 1.00041, 96: 1.00069, 97: 1.00098, 98: 1.00127, 99: 1.00155, 100: 1.00184, 101: 1.00212, 102: 1.0024, 103: 1.00269, 104: 1.00297, 105: 1.00325, 106: 1.00353, 107: 1.00381, 108: 1.00409, 109: 1.00437, 110: 1.00464, 111: 1.00492, 112: 1.00519, 113: 1.00546, 114: 1.00573, 115: 1.006, 116: 1.00626, 117: 1.00653, 118: 1.00679, 119: 1.00705, 120: 1.00731, 121: 1.00756, 122: 1.00781, 123: 1.00806, 124: 1.00831, 125: 1.00856, 126: 1.0088, 127: 1.00904, 128: 1.00928, 129: 1.00952, 130: 1.00975, 131: 1.00998, 132: 1.0102, 133: 1.01043, 134: 1.01065, 135: 1.01087, 136: 1.01108, 137: 1.01129, 138: 1.0115, 139: 1.0117, 140: 1.01191, 141: 1.0121, 142: 1.0123, 143: 1.01249, 144: 1.01267, 145: 1.01286, 146: 1.01304, 147: 1.01321, 148: 1.01338, 149: 1.01355, 150: 1.01371, 151: 1.01387, 152: 1.01403, 153: 1.01418, 154: 1.01433, 155: 1.01447, 156: 1.01461, 157: 1.01475, 158: 1.01488, 159: 1.015, 160: 1.01513, 161: 1.01524, 162: 1.01536, 163: 1.01547, 164: 1.01557, 165: 1.01567, 166: 1.01577, 167: 1.01586, 168: 1.01595, 169: 1.01603, 170: 1.0161, 171: 1.01618, 172: 1.01625, 173: 1.01631, 174: 1.01637, 175: 1.01642, 176: 1.01647, 177: 1.01652, 178: 1.01656, 179: 1.01659, 180: 1.01662, 181: 1.01665, 182: 1.01667, 183: 1.01668, 184: 1.0167, 185: 1.0167, 186: 1.0167, 187: 1.0167, 188: 1.01669, 189: 1.01668, 190: 1.01666, 191: 1.01664, 192: 1.01661, 193: 1.01658, 194: 1.01655, 195: 1.0165, 196: 1.01646, 197: 1.01641, 198: 1.01635, 199: 1.01629, 200: 1.01623, 201: 1.01616, 202: 1.01609, 203: 1.01601, 204: 1.01592, 205: 1.01584, 206: 1.01575, 207: 1.01565, 208: 1.01555, 209: 1.01544, 210: 1.01533, 211: 1.01522, 212: 1.0151, 213: 1.01497, 214: 1.01485, 215: 1.01471, 216: 1.01458, 217: 1.01444, 218: 1.01429, 219: 1.01414, 220: 1.01399, 221: 1.01383, 222: 1.01367, 223: 1.01351, 224: 1.01334, 225: 1.01317, 226: 1.01299, 227: 1.01281, 228: 1.01263, 229: 1.01244, 230: 1.01225, 231: 1.01205, 232: 1.01186, 233: 1.01165, 234: 1.01145, 235: 1.01124, 236: 1.01103, 237: 1.01081, 238: 1.0106, 239: 1.01037, 240: 1.01015, 241: 1.00992, 242: 1.00969, 243: 1.00946, 244: 1.00922, 245: 1.00898, 246: 1.00874, 247: 1.0085, 248: 1.00825, 249: 1.008, 250: 1.00775, 251: 1.0075, 252: 1.00724, 253: 1.00698, 254: 1.00672, 255: 1.00646, 256: 1.0062, 257: 1.00593, 258: 1.00566, 259: 1.00539, 260: 1.00512, 261: 1.00485, 262: 1.00457, 263: 1.0043, 264: 1.00402, 265: 1.00374, 266: 1.00346, 267: 1.00318, 268: 1.0029, 269: 1.00262, 270: 1.00234, 271: 1.00205, 272: 1.00177, 273: 1.00148, 274: 1.00119, 275: 1.00091, 276: 1.00062, 277: 1.00033, 278: 1.00005, 279: 0.99976, 280: 0.99947, 281: 0.99918, 282: 0.9989, 283: 0.99861, 284: 0.99832, 285: 0.99804, 286: 0.99775, 287: 0.99747, 288: 0.99718, 289: 0.9969, 290: 0.99662, 291: 0.99634, 292: 0.99605, 293: 0.99577, 294: 0.9955, 295: 0.99522, 296: 0.99494, 297: 0.99467, 298: 0.9944, 299: 0.99412, 300: 0.99385, 301: 0.99359, 302: 0.99332, 303: 0.99306, 304: 0.99279, 305: 0.99253, 306: 0.99228, 307: 0.99202, 308: 0.99177, 309: 0.99152, 310: 0.99127, 311: 0.99102, 312: 0.99078, 313: 0.99054, 314: 0.9903, 315: 0.99007, 316: 0.98983, 317: 0.98961, 318: 0.98938, 319: 0.98916, 320: 0.98894, 321: 0.98872, 322: 0.98851, 323: 0.9883, 324: 0.98809, 325: 0.98789, 326: 0.98769, 327: 0.9875, 328: 0.98731, 329: 0.98712, 330: 0.98694, 331: 0.98676, 332: 0.98658, 333: 0.98641, 334: 0.98624, 335: 0.98608, 336: 0.98592, 337: 0.98577, 338: 0.98562, 339: 0.98547, 340: 0.98533, 341: 0.98519, 342: 0.98506, 343: 0.98493, 344: 0.98481, 345: 0.98469, 346: 0.98457, 347: 0.98446, 348: 0.98436, 349: 0.98426, 350: 0.98416, 351: 0.98407, 352: 0.98399, 353: 0.98391, 354: 0.98383, 355: 0.98376, 356: 0.9837, 357: 0.98363, 358: 0.98358, 359: 0.98353, 360: 0.98348, 361: 0.98344, 362: 0.9834, 363: 0.98337, 364: 0.98335, 365: 0.98333, 366: 0.98331} K1 = [3040.136402, 2482.375199, 1935.060183, 866.468575, 641.326517] K2 = [1735.337945, 1666.398761, 1585.420044, 1350.069147, 1271.221673] ESUN = [1848, 1549, 1114, 225.4, 86.63, 81.85, 74.85, 66.49, 59.85] class ImportData(): """ Import Data - Interfaces with GDAL routines. Attributes ---------- name : str item name pbar : progressbar reference to a progress bar. parent : parent reference to the parent routine outdata : dictionary dictionary of output datasets ifile : str input file name. Used in main.py ext : str filename extension """ def __init__(self, parent=None, extscene=None): self.ifile = '' self.filt = '' self.parent = parent self.indata = {} self.outdata = {} self.extscene = extscene if parent is None: self.showprocesslog = print else: self.showprocesslog = parent.showprocesslog def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ piter = self.parent.pbar.iter if self.extscene is None: return False if not nodialog: self.ifile, self.filt = QtWidgets.QFileDialog.getOpenFileName( self.parent, 'Open File', '.', self.extscene) if self.ifile == '': return False os.chdir(os.path.dirname(self.ifile)) dat = get_data(self.ifile, piter, self.showprocesslog, self.extscene) if dat is None: if self.filt == 'hdf (*.hdf *.h5)': QtWidgets.QMessageBox.warning(self.parent, 'Error', 'Could not import the data.' 'Currently only ASTER' 'is supported.', QtWidgets.QMessageBox.Ok) else: QtWidgets.QMessageBox.warning(self.parent, 'Error', 'Could not import the data.', QtWidgets.QMessageBox.Ok) return False output_type = 'Raster' self.outdata[output_type] = dat return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.ifile = projdata['ifile'] self.filt = projdata['filt'] self.extscene = projdata['extscene'] chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['ifile'] = self.ifile projdata['filt'] = self.filt projdata['extscene'] = self.extscene return projdata class ImportBatch(): """ Batch Import Data Interface. This does not actually import data, but rather defines a list of datasets to be used by other routines. Attributes ---------- parent : parent reference to the parent routine. idir : str Input directory. ifile : str Input file. indata : dictionary dictionary of input datasets. outdata : dictionary dictionary of output datasets. """ def __init__(self, parent=None): self.ifile = '' self.idir = '' self.parent = parent self.indata = {} self.outdata = {} def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if not nodialog or self.idir == '': self.idir = QtWidgets.QFileDialog.getExistingDirectory( self.parent, 'Select Directory') if self.idir == '': return False os.chdir(self.idir) zipdat = glob.glob(self.idir+'//AST*.zip') hdfdat = glob.glob(self.idir+'//AST*.hdf') # tifdat = glob.glob(directory+'//AST*.tif') targzdat = glob.glob(self.idir+'//L*.tar*') mtldat = glob.glob(self.idir+'//L*MTL.txt') sendat = [] sendir = [f.path for f in os.scandir(self.idir) if f.is_dir() and 'SAFE' in f.path] for i in sendir: sendat.extend(glob.glob(i+'//MTD*.xml')) if (not hdfdat and not zipdat and not targzdat and not mtldat and not sendat): QtWidgets.QMessageBox.warning(self.parent, 'Error', 'No valid files in the directory.', QtWidgets.QMessageBox.Ok) return False dat = [] for i in hdfdat: if 'met' not in i: dat.append(i) dat.extend(mtldat) dat.extend(targzdat) dat.extend(zipdat) dat.extend(sendat) # for i in tifdat: # if i[:i.rindex('_')]+'_MTL.txt' in mtldat: # continue # dat.append(i) output_type = 'RasterFileList' self.outdata[output_type] = dat return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.idir = projdata['idir'] chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['idir'] = self.idir return projdata class ImportSentinel5P(QtWidgets.QDialog): """ Import Sentinel 5P data to shapefile. This class imports Sentinel 5P data. Attributes ---------- name : str item name pbar : progressbar reference to a progress bar. parent : parent reference to the parent routine outdata : dictionary dictionary of output datasets ifile : str input file name. Used in main.py """ def __init__(self, parent=None): super().__init__(parent) if parent is None: self.showprocesslog = print else: self.showprocesslog = parent.showprocesslog self.parent = parent self.indata = {} self.outdata = {} self.ifile = '' self.filt = '' self.subdata = QtWidgets.QComboBox() self.lonmin = QtWidgets.QLineEdit('16') self.lonmax = QtWidgets.QLineEdit('34') self.latmin = QtWidgets.QLineEdit('-35') self.latmax = QtWidgets.QLineEdit('-21') self.setupui() def setupui(self): """ Set up UI. Returns ------- None. """ gridlayout_main = QtWidgets.QGridLayout(self) buttonbox = QtWidgets.QDialogButtonBox() helpdocs = menu_default.HelpButton('pygmi.rsense.iodefs.importsentinel5p') label_subdata = QtWidgets.QLabel('Product:') label_lonmin = QtWidgets.QLabel('Minimum Longitude:') label_lonmax = QtWidgets.QLabel('Maximum Longitude:') label_latmin = QtWidgets.QLabel('Minimum Latitude:') label_latmax = QtWidgets.QLabel('Maximum Latitude:') buttonbox.setOrientation(QtCore.Qt.Horizontal) buttonbox.setCenterButtons(True) buttonbox.setStandardButtons(buttonbox.Cancel | buttonbox.Ok) self.setWindowTitle(r'Import Sentinel-5P Data') gridlayout_main.addWidget(label_subdata, 0, 0, 1, 1) gridlayout_main.addWidget(self.subdata, 0, 1, 1, 1) gridlayout_main.addWidget(label_lonmin, 1, 0, 1, 1) gridlayout_main.addWidget(self.lonmin, 1, 1, 1, 1) gridlayout_main.addWidget(label_lonmax, 2, 0, 1, 1) gridlayout_main.addWidget(self.lonmax, 2, 1, 1, 1) gridlayout_main.addWidget(label_latmin, 3, 0, 1, 1) gridlayout_main.addWidget(self.latmin, 3, 1, 1, 1) gridlayout_main.addWidget(label_latmax, 4, 0, 1, 1) gridlayout_main.addWidget(self.latmax, 4, 1, 1, 1) gridlayout_main.addWidget(helpdocs, 5, 0, 1, 1) gridlayout_main.addWidget(buttonbox, 5, 1, 1, 3) buttonbox.accepted.connect(self.accept) buttonbox.rejected.connect(self.reject) def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if not nodialog: ext = ('Sentinel-5P (*.nc)') self.ifile, self.filt = QtWidgets.QFileDialog.getOpenFileName( self.parent, 'Open File', '.', ext) if self.ifile == '': return False os.chdir(os.path.dirname(self.ifile)) meta = self.get_5P_meta() if meta is None: return False tmp = [] for i in meta: if i in ['latitude', 'longitude']: continue tmp.append(i) self.subdata.clear() self.subdata.addItems(tmp) self.subdata.setCurrentIndex(0) tmp = self.exec_() if tmp != 1: return tmp try: _ = float(self.lonmin.text()) _ = float(self.latmin.text()) _ = float(self.lonmax.text()) _ = float(self.latmax.text()) except ValueError: self.showprocesslog('Value error - abandoning import') return False gdf = self.get_5P_data(meta) if gdf is None: return False dat = {gdf.geom_type.iloc[0]: gdf} self.outdata['Vector'] = dat return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.ifile = projdata['ifile'] self.filt = projdata['filt'] chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['ifile'] = self.ifile projdata['filt'] = self.filt return projdata def get_5P_meta(self): """ Get metadata. Returns ------- meta : Dictionary Dictionary containing metadata. """ dataset = gdal.Open(self.ifile, gdal.GA_ReadOnly) if dataset is None: self.showprocesslog('Problem! Unable to import') self.showprocesslog(os.path.basename(self.ifile)) return None subdata = dataset.GetSubDatasets() meta = {} for i in subdata: tmp = i[1].split() if 'SUPPORT_DATA' in i[0]: continue if 'METADATA' in i[0]: continue if 'time_utc' in i[0]: continue if 'delta_time' in i[0]: continue if 'qa_value' in i[0]: continue if 'precision' in i[0]: continue tmp = tmp[1].replace('//PRODUCT/', '') tmp = tmp.replace('/PRODUCT/', '') tmp = tmp.replace('/', '') meta[tmp] = i dataset = None return meta def get_5P_data(self, meta): """ Get 5P data. Parameters ---------- meta : Dictionary Dictionary containing metadata. Returns ------- gdf : DataFrame geopandas dataframe. """ dataset = gdal.Open(meta['latitude'][0], gdal.GA_ReadOnly) rtmp = dataset.GetRasterBand(1) lats = rtmp.ReadAsArray() dataset = None dataset = gdal.Open(meta['longitude'][0], gdal.GA_ReadOnly) rtmp = dataset.GetRasterBand(1) lons = rtmp.ReadAsArray() dataset = None del meta['latitude'] del meta['longitude'] if lats is None: self.showprocesslog('No Latitudes in dataset') return None lats = lats.flatten() lons = lons.flatten() pnts = np.transpose([lons, lats]) lonmin = float(self.lonmin.text()) latmin = float(self.latmin.text()) lonmax = float(self.lonmax.text()) latmax = float(self.latmax.text()) mask = ((lats > latmin) & (lats < latmax) & (lons < lonmax) & (lons > lonmin)) idfile = self.subdata.currentText() dfile = meta[idfile][0] dataset = gdal.Open(dfile, gdal.GA_ReadOnly) rtmp = dataset.GetRasterBand(1) dat = rtmp.ReadAsArray() dataset = None dat1 = dat.flatten() if mask.shape != dat1.shape: return None dat1 = dat1[mask] pnts1 = pnts[mask] pnts1 = pnts1[dat1 != 9.96921e+36] dat1 = dat1[dat1 != 9.96921e+36] if dat1.size == 0: self.showprocesslog(idfile, 'is empty.') return None df = pd.DataFrame({'lon': pnts1[:, 0], 'lat': pnts1[:, 1]}) df['data'] = dat1 gdf = GeoDataFrame(df.drop(['lon', 'lat'], axis=1), geometry=[Point(xy) for xy in zip(df.lon, df.lat)]) # tmp = os.path.join(idir, os.path.basename(ifile).split('T')[0]) # tmp = tmp + '_' + idfile + '.shp' # tmp = tmp.replace('//PRODUCT/', '') # tmp = tmp.replace('/PRODUCT/', '') # tmp = tmp.replace('/', '') # gdf.to_file(tmp) return gdf class ImportShapeData(): """ Import Shapefile Data. Attributes ---------- name : str item name pbar : progressbar reference to a progress bar. parent : parent reference to the parent routine outdata : dictionary dictionary of output datasets ifile : str input file name. Used in main.py """ def __init__(self, parent=None): self.parent = parent self.indata = {} self.outdata = {} self.ifile = '' def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if not nodialog: ext = 'Shapefile (*.shp);;' + 'All Files (*.*)' self.ifile, _ = QtWidgets.QFileDialog.getOpenFileName(self.parent, 'Open File', '.', ext) if self.ifile == '': return False os.chdir(os.path.dirname(self.ifile)) gdf = gpd.read_file(self.ifile) dat = {gdf.geom_type.iloc[0]: gdf} self.outdata['Vector'] = dat return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.ifile = projdata['ifile'] chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['ifile'] = self.ifile return projdata def calculate_toa(dat, showprocesslog=print): """ Top of atmosphere correction. Includes VNIR, SWIR and TIR bands. Parameters ---------- dat : Data PyGMI raster dataset Returns ------- out : Data PyGMI raster dataset """ showprocesslog('Calculating top of atmosphere...') datanew = {} for datai in dat: datanew[datai.dataid.split()[1]] = copy.deepcopy(datai) out = [] for i in range(len(dat)): idtmp = 'ImageData'+str(i+1) if i+1 == 3: idtmp += 'N' datai = datanew[idtmp] gain = datai.metadata['Gain'] sunelev = datai.metadata['SolarElev'] jday = datai.metadata['JulianDay'] lrad = (datai.data-1)*gain if i < 9: theta = np.deg2rad(90-sunelev) datai.data = np.pi*lrad*EDIST[jday]**2/(ESUN[i]*np.cos(theta)) else: datai.data = K2[i-9]/np.log(K1[i-9]/lrad+1) datai.data.set_fill_value(datai.nullvalue) dmask = datai.data.mask datai.data = np.ma.array(datai.data.filled(), mask=dmask) out.append(datai) return out def get_data(ifile, piter=iter, showprocesslog=print, extscene=None): """ Load a raster dataset off the disk using the GDAL libraries. It returns the data in a PyGMI data object. Parameters ---------- ifile : str filename to import piter : iter, optional Progress bar iterable. Default is iter showprogresslog : print, optional Routine for displaying messages. Default is print extscene : str or None String used currently to give an option to limit bands in Sentinel-2 Returns ------- dat : PyGMI raster Data dataset imported """ ifile = ifile[:] bfile = os.path.basename(ifile) showprocesslog('Importing', bfile) if 'AST_' in bfile and 'hdf' in bfile.lower(): dat = get_aster_hdf(ifile, piter) elif 'AST_' in bfile and 'zip' in bfile.lower(): dat = get_aster_zip(ifile, piter, showprocesslog) elif bfile[:4] in ['LT04', 'LT05', 'LE07', 'LC08', 'LM05']: dat = get_landsat(ifile, piter, showprocesslog) elif ('.xml' in bfile and '.SAFE' in ifile) or 'Sentinel-2' in extscene: dat = get_sentinel2(ifile, piter, showprocesslog, extscene) elif 'MOD' in bfile and 'hdf' in bfile.lower() and '.006.' in bfile: dat = get_modisv6(ifile, piter) else: dat = None if dat is not None: for i in dat: i.dataid = i.dataid.replace(',', ' ') return dat def get_modis(ifile, showprocesslog=print): """ Get MODIS data. Parameters ---------- ifile : str filename to import showprogresslog : print, optional Routine for displaying messages. Default is print Returns ------- dat : PyGMI raster Data dataset imported """ dat = [] ifile = ifile[:] dataset = gdal.Open(ifile, gdal.GA_ReadOnly) subdata = dataset.GetSubDatasets() latentry = [i for i in subdata if 'Latitude' in i[1]] subdata.pop(subdata.index(latentry[0])) dataset = None dataset = gdal.Open(latentry[0][0], gdal.GA_ReadOnly) rtmp = dataset.GetRasterBand(1) lats = rtmp.ReadAsArray() latsdim = ((lats.max()-lats.min())/(lats.shape[0]-1))/2 lonentry = [i for i in subdata if 'Longitude' in i[1]] subdata.pop(subdata.index(lonentry[0])) dataset = None dataset = gdal.Open(lonentry[0][0], gdal.GA_ReadOnly) rtmp = dataset.GetRasterBand(1) lons = rtmp.ReadAsArray() lonsdim = ((lons.max()-lons.min())/(lons.shape[1]-1))/2 lonsdim = latsdim tlx = lons.min()-abs(lonsdim/2) tly = lats.max()+abs(latsdim/2) cols = int((lons.max()-lons.min())/lonsdim)+1 rows = int((lats.max()-lats.min())/latsdim)+1 newx2, newy2 = np.mgrid[0:rows, 0:cols] newx2 = newx2*lonsdim + tlx newy2 = tlx - newy2*latsdim tmp = [] for i in subdata: if 'HDF4_EOS:EOS_SWATH' in i[0]: tmp.append(i) subdata = tmp i = -1 for ifile2, bandid2 in subdata: dataset = None dataset = gdal.Open(ifile2, gdal.GA_ReadOnly) rtmp2 = dataset.ReadAsArray() if rtmp2.shape[-1] == min(rtmp2.shape) and rtmp2.ndim == 3: rtmp2 = np.transpose(rtmp2, (2, 0, 1)) nbands = 1 if rtmp2.ndim == 3: nbands = rtmp2.shape[0] for i2 in range(nbands): rtmp = dataset.GetRasterBand(i2+1) bandid = rtmp.GetDescription() nval = rtmp.GetNoDataValue() i += 1 dat.append(Data()) if rtmp2.ndim == 3: dat[i].data = rtmp2[i2] else: dat[i].data = rtmp2 newx = lons[dat[i].data != nval] newy = lats[dat[i].data != nval] newz = dat[i].data[dat[i].data != nval] if newx.size == 0: dat[i].data = np.zeros((rows, cols)) + nval else: tmp = quickgrid(newx, newy, newz, latsdim, showprocesslog=showprocesslog) mask = np.ma.getmaskarray(tmp) gdat = tmp.data dat[i].data = np.ma.masked_invalid(gdat[::-1]) dat[i].data.mask = mask[::-1] if dat[i].data.dtype.kind == 'i': if nval is None: nval = 999999 nval = int(nval) elif dat[i].data.dtype.kind == 'u': if nval is None: nval = 0 nval = int(nval) else: if nval is None: nval = 1e+20 nval = float(nval) dat[i].data = np.ma.masked_invalid(dat[i].data) dat[i].data.mask = (np.ma.getmaskarray(dat[i].data) | (dat[i].data == nval)) if dat[i].data.mask.size == 1: dat[i].mask = np.ma.getmaskarray(dat[i].data) dat[i].dataid = bandid2+' '+bandid dat[i].nullvalue = nval dat[i].xdim = abs(lonsdim) dat[i].ydim = abs(latsdim) rows, cols = dat[i].data.shape xmin = tlx ymax = tly ymin = ymax - rows*dat[i].ydim xmax = xmin + cols*dat[i].xdim dat[i].extent = [xmin, xmax, ymin, ymax] srs = osr.SpatialReference() srs.ImportFromWkt(dataset.GetProjection()) srs.AutoIdentifyEPSG() srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) dat[i].wkt = srs.ExportToWkt() dataset = None return dat def get_modisv6(ifile, piter=iter): """ Get MODIS v006 data. Parameters ---------- ifile : str filename to import piter : iter, optional Progress bar iterable. Default is iter Returns ------- dat : PyGMI raster Data dataset imported """ dat = [] ifile = ifile[:] dataset = gdal.Open(ifile, gdal.GA_ReadOnly) # dmeta = dataset.GetMetadata() subdata = dataset.GetSubDatasets() dataset = None dat = [] nval = 0 for ifile2, bandid2 in subdata: dataset = gdal.Open(ifile2, gdal.GA_ReadOnly) wkt = dataset.GetProjectionRef() if 'Sinusoidal' in wkt: wkt = wkt.replace('PROJCS["unnamed"', 'PROJCS["Sinusoidal"') wkt = wkt.replace('GEOGCS["Unknown datum based upon the custom ' 'spheroid"', 'GEOGCS["GCS_Unknown"') wkt = wkt.replace('DATUM["Not specified ' '(based on custom spheroid)"', 'DATUM["D_Unknown"') wkt = wkt.replace('SPHEROID["Custom spheroid"', 'SPHEROID["S_Unknown"') meta = dataset.GetMetadata() nval = int(meta['_FillValue']) bandid = bandid2.split('] ')[1].split(' (')[0] if 'scale_factor' in meta: scale = float(meta['scale_factor']) else: scale = 1 if 'add_offset' in meta: offset = float(meta['add_offset']) else: offset = 0 rtmp2 = dataset.ReadAsArray() rtmp2 = rtmp2.astype(float) mask = (rtmp2 == nval) if nval == 32767: mask = (rtmp2 > 32700) rtmp2 = rtmp2*scale+offset if mask is not None: rtmp2[mask] = nval dat.append(Data()) dat[-1].data = rtmp2 dat[-1].data = np.ma.masked_invalid(dat[-1].data) dat[-1].data.mask = dat[-1].data.mask | (dat[-1].data == nval) if dat[-1].data.mask.size == 1: dat[-1].data.mask = (np.ma.make_mask_none(dat[-1].data.shape) + dat[-1].data.mask) dat[-1].extent_from_gtr(dataset.GetGeoTransform()) dat[-1].dataid = bandid dat[-1].nullvalue = nval dat[-1].wkt = wkt dat[-1].filename = ifile if 'units' in meta and meta['units'] != 'none': dat[-1].units = '$'+meta['units']+'$' dataset = None return dat def get_landsat(ifilet, piter=iter, showprocesslog=print): """ Get Landsat Data. Parameters ---------- ifilet : str filename to import piter : iter, optional Progress bar iterable. Default is iter showprogresslog : print, optional Routine for displaying messages. Default is print Returns ------- out : Data PyGMI raster dataset """ platform = os.path.basename(ifilet)[2: 4] satbands = None if platform == '04' or platform == '05': satbands = {'1': [450, 520], '2': [520, 600], '3': [630, 690], '4': [760, 900], '5': [1550, 1750], '6': [10400, 12500], '7': [2080, 2350]} if platform == '07': satbands = {'1': [450, 520], '2': [520, 600], '3': [630, 690], '4': [770, 900], '5': [1550, 1750], '6': [10400, 12500], '7': [2090, 2350], '8': [520, 900]} if platform == '08': satbands = {'1': [430, 450], '2': [450, 510], '3': [530, 590], '4': [640, 670], '5': [850, 880], '6': [1570, 1650], '7': [2110, 2290], '8': [500, 680], '9': [1360, 1380], '10': [1060, 11190], '11': [11500, 12510]} idir = os.path.dirname(ifilet) if '.tar' in ifilet: with tarfile.open(ifilet) as tar: tarnames = tar.getnames() ifile = next((i for i in tarnames if '_MTL.txt' in i), None) if ifile is None: showprocesslog('Could not find MTL.txt file in tar archive') return None showprocesslog('Extracting tar...') tar.extractall(idir) ifile = os.path.join(idir, ifile) elif '_MTL.txt' in ifilet: ifile = ifilet else: showprocesslog('Input needs to be tar.gz or _MTL.txt') return None files = glob.glob(ifile[:-7]+'*[0-9].tif') showprocesslog('Importing Landsat data...') nval = 0 dat = [] for ifile2 in piter(files): if 'B6_VCID' in ifile2: fext = ifile2[-12:-4] elif ifile2[-6].isdigit(): fext = ifile2[-6:-4] else: fext = ifile2[-5] showprocesslog('Importing Band', fext) dataset = gdal.Open(ifile2, gdal.GA_ReadOnly) if dataset is None: showprocesslog('Problem with band '+fext) continue rtmp = dataset.GetRasterBand(1) dat.append(Data()) dat[-1].data = rtmp.ReadAsArray() dat[-1].data = np.ma.masked_invalid(dat[-1].data) dat[-1].data.mask = dat[-1].data.mask | (dat[-1].data == nval) if dat[-1].data.mask.size == 1: dat[-1].data.mask = (np.ma.make_mask_none(dat[-1].data.shape) + dat[-1].data.mask) dat[-1].extent_from_gtr(dataset.GetGeoTransform()) dat[-1].dataid = 'Band' + fext dat[-1].nullvalue = nval dat[-1].wkt = dataset.GetProjectionRef() dat[-1].filename = ifile bmeta = dat[-1].metadata if satbands is not None: bmeta['WavelengthMin'] = satbands[fext][0] bmeta['WavelengthMax'] = satbands[fext][1] dataset = None if dat == []: dat = None if '.tar' in ifilet: showprocesslog('Cleaning Extracted tar files...') for tfile in piter(tarnames): print(tfile) os.remove(os.path.join(os.path.dirname(ifile), tfile)) print('Import complete') return dat def get_sentinel2(ifile, piter=iter, showprocesslog=print, extscene=None): """ Get Sentinel-2 Data. Parameters ---------- ifile : str filename to import piter : iter, optional Progress bar iterable. Default is iter showprogresslog : print, optional Routine for displaying messages. Default is print extscene : str or None String used currently to give an option to limit bands in Sentinel-2 Returns ------- dat : PyGMI raster Data dataset imported """ ifile = ifile[:] dataset = gdal.Open(ifile, gdal.GA_ReadOnly) subdata = dataset.GetSubDatasets() subdata = [i for i in subdata if 'True color' not in i[1]] nval = 0 dat = [] for bfile, _ in subdata: dataset = gdal.Open(bfile, gdal.GA_ReadOnly) showprocesslog('Importing '+os.path.basename(bfile)) if dataset is None: showprocesslog('Problem with '+ifile) continue for i in piter(range(dataset.RasterCount)): rtmp = dataset.GetRasterBand(i+1) bname = rtmp.GetDescription() bmeta = rtmp.GetMetadata() if ('Sentinel-2 Bands Only' in extscene and 'central wavelength' not in bname.lower()): continue if 'WAVELENGTH' in bmeta and 'BANDWIDTH' in bmeta: wlen = float(bmeta['WAVELENGTH']) bwidth = float(bmeta['BANDWIDTH']) bmeta['WavelengthMin'] = wlen - bwidth/2 bmeta['WavelengthMax'] = wlen + bwidth/2 # self.showprocesslog('Importing '+bname) dat.append(Data()) dat[-1].data = rtmp.ReadAsArray() dat[-1].data = np.ma.masked_invalid(dat[-1].data) dat[-1].data.mask = dat[-1].data.mask | (dat[-1].data == nval) if dat[-1].data.mask.size == 1: dat[-1].mask = np.ma.getmaskarray(dat[-1].data) dat[-1].data = dat[-1].data.astype(float) dat[-1].data = dat[-1].data / 10000. dat[-1].dataid = bname dat[-1].nullvalue = nval dat[-1].extent_from_gtr(dataset.GetGeoTransform()) dat[-1].wkt = dataset.GetProjectionRef() dat[-1].filename = ifile dat[-1].units = 'Reflectance' dat[-1].metadata.update(bmeta) if 'SOLAR_IRRADIANCE_UNIT' in bmeta: dat[-1].units = bmeta['SOLAR_IRRADIANCE_UNIT'] if dat == []: dat = None return dat def get_aster_zip(ifile, piter=iter, showprocesslog=print): """ Get ASTER zip Data. Parameters ---------- ifile : str filename to import piter : iter, optional Progress bar iterable. Default is iter showprogresslog : print, optional Routine for displaying messages. Default is print Returns ------- dat : PyGMI raster Data dataset imported """ satbands = {'1': [520, 600], '2': [630, 690], '3N': [780, 860], '3B': [780, 860], '4': [1600, 1700], '5': [2145, 2185], '6': [2185, 2225], '7': [2235, 2285], '8': [2295, 2365], '9': [2360, 2430], '10': [8125, 8475], '11': [8475, 8825], '12': [8925, 9275], '13': [10250, 10950], '14': [10950, 11650]} if 'AST_07' in ifile: scalefactor = 0.001 units = 'Surface Reflectance' elif 'AST_05' in ifile: scalefactor = 0.001 units = 'Surface Emissivity' elif 'AST_08' in ifile: scalefactor = 0.1 units = 'Surface Kinetic Temperature' # elif 'AST_09' in ifile: # scalefactor = None else: return None showprocesslog('Extracting zip...') idir = os.path.dirname(ifile) zfile = zipfile.ZipFile(ifile) zipnames = zfile.namelist() zfile.extractall(idir) zfile.close() dat = [] nval = 0 for zfile in piter(zipnames): if zfile.lower()[-4:] != '.tif': continue dataset = gdal.Open(os.path.join(idir, zfile), gdal.GA_ReadOnly) if dataset is None: showprocesslog('Problem with '+zfile) continue dataset = gdal.AutoCreateWarpedVRT(dataset) rtmp = dataset.GetRasterBand(1) dat.append(Data()) dat[-1].data = rtmp.ReadAsArray() dat[-1].data = np.ma.masked_invalid(dat[-1].data)*scalefactor dat[-1].data.mask = dat[-1].data.mask | (dat[-1].data == nval) if dat[-1].data.mask.size == 1: dat[-1].mask = np.ma.getmaskarray(dat[-1].data) dat[-1].extent_from_gtr(dataset.GetGeoTransform()) dat[-1].dataid = zfile[zfile.index('Band'):zfile.index('.tif')] dat[-1].nullvalue = nval dat[-1].wkt = dataset.GetProjectionRef() dat[-1].filename = ifile dat[-1].units = units bmeta = dat[-1].metadata if satbands is not None: fext = dat[-1].dataid[4:] bmeta['WavelengthMin'] = satbands[fext][0] bmeta['WavelengthMax'] = satbands[fext][1] dataset = None showprocesslog('Cleaning Extracted zip files...') for zfile in zipnames: os.remove(os.path.join(idir, zfile)) return dat def get_aster_hdf(ifile, piter=iter): """ Get ASTER hdf Data. Parameters ---------- ifile : str filename to import piter : iter, optional Progress bar iterable. Default is iter Returns ------- dat : PyGMI raster Data dataset imported """ satbands = {'1': [520, 600], '2': [630, 690], '3N': [780, 860], '3B': [780, 860], '4': [1600, 1700], '5': [2145, 2185], '6': [2185, 2225], '7': [2235, 2285], '8': [2295, 2365], '9': [2360, 2430], '10': [8125, 8475], '11': [8475, 8825], '12': [8925, 9275], '13': [10250, 10950], '14': [10950, 11650]} ifile = ifile[:] if 'AST_07' in ifile: ptype = '07' elif 'AST_L1T' in ifile: ptype = 'L1T' elif 'AST_05' in ifile: ptype = '05' elif 'AST_08' in ifile: ptype = '08' else: return None dataset = gdal.Open(ifile, gdal.GA_ReadOnly) meta = dataset.GetMetadata() if ptype == 'L1T': ucc = {'ImageData1': float(meta['INCL1']), 'ImageData2': float(meta['INCL2']), 'ImageData3N': float(meta['INCL3N']), 'ImageData4': float(meta['INCL4']), 'ImageData5': float(meta['INCL5']), 'ImageData6': float(meta['INCL6']), 'ImageData7': float(meta['INCL7']), 'ImageData8': float(meta['INCL8']), 'ImageData9': float(meta['INCL9']), 'ImageData10': float(meta['INCL10']), 'ImageData11': float(meta['INCL11']), 'ImageData12': float(meta['INCL12']), 'ImageData13': float(meta['INCL13']), 'ImageData14': float(meta['INCL14'])} solarelev = float(meta['SOLARDIRECTION'].split()[1]) cdate = meta['CALENDARDATE'] if len(cdate) == 8: fmt = '%Y%m%d' else: fmt = '%Y-%m-%d' dte = datetime.datetime.strptime(cdate, fmt) jdate = dte.timetuple().tm_yday subdata = dataset.GetSubDatasets() if ptype == '07': subdata = [i for i in subdata if 'SurfaceReflectance' in i[0]] scalefactor = 0.001 units = 'Surface Reflectance' elif ptype == '05': subdata = [i for i in subdata if 'SurfaceEmissivity' in i[0]] scalefactor = 0.001 units = 'Surface Emissivity' elif ptype == '08': scalefactor = 0.1 units = 'Surface Kinetic Temperature' elif ptype == 'L1T': subdata = [i for i in subdata if 'ImageData' in i[0]] scalefactor = 1 units = '' else: return None dat = [] nval = 0 calctoa = False for bfile, bandid in piter(subdata): if 'QA' in bfile: continue if ptype == 'L1T' and 'ImageData3B' in bfile: continue bandid2 = bandid[bandid.lower().index(']')+1: bandid.lower().index('(')].strip() dataset = gdal.Open(bfile, gdal.GA_ReadOnly) tmpds = gdal.AutoCreateWarpedVRT(dataset) if tmpds is None: continue dat.append(Data()) dat[-1].data = tmpds.ReadAsArray() if ptype == '08': dat[-1].data[dat[-1].data == 2000] = nval dat[-1].data = np.ma.masked_invalid(dat[-1].data)*scalefactor dat[-1].data.mask = dat[-1].data.mask | (dat[-1].data == nval) if dat[-1].data.mask.size == 1: dat[-1].mask = np.ma.getmaskarray(dat[-1].data) dat[-1].dataid = bandid2 dat[-1].nullvalue = nval dat[-1].extent_from_gtr(tmpds.GetGeoTransform()) dat[-1].wkt = tmpds.GetProjectionRef() dat[-1].metadata['SolarElev'] = solarelev dat[-1].metadata['JulianDay'] = jdate dat[-1].metadata['CalendarDate'] = cdate dat[-1].metadata['ShortName'] = meta['SHORTNAME'] dat[-1].filename = ifile dat[-1].units = units bmeta = dat[-1].metadata if satbands is not None: fext = dat[-1].dataid[4:].split()[0] bmeta['WavelengthMin'] = satbands[fext][0] bmeta['WavelengthMax'] = satbands[fext][1] if ptype == 'L1T' and 'ImageData' in ifile: dat[-1].metadata['Gain'] = ucc[ifile[ifile.rindex('ImageData'):]] calctoa = True if dat == []: dat = None if ptype == 'L1T' and calctoa is True: dat = calculate_toa(dat) return dat def get_aster_ged(ifile): """ Get ASTER GED data. Parameters ---------- ifile : str filename to import Returns ------- dat : PyGMI raster Data dataset imported """ dat = [] ifile = ifile[:] dataset = gdal.Open(ifile, gdal.GA_ReadOnly) subdata = dataset.GetSubDatasets() i = -1 for ifile2, bandid2 in subdata: dataset = gdal.Open(ifile2, gdal.GA_ReadOnly) bandid = bandid2 units = '' if 'ASTER_GDEM' in bandid2: bandid = 'ASTER GDEM' units = 'meters' if 'Land_Water_Map' in bandid2: bandid = 'Land_water_map' if 'Observations' in bandid2: bandid = 'Observations' units = 'number per pixel' rtmp2 = dataset.ReadAsArray() if rtmp2.shape[-1] == min(rtmp2.shape) and rtmp2.ndim == 3: rtmp2 = np.transpose(rtmp2, (2, 0, 1)) nbands = 1 if rtmp2.ndim == 3: nbands = rtmp2.shape[0] for i2 in range(nbands): nval = -9999 i += 1 dat.append(Data()) if rtmp2.ndim == 3: dat[i].data = rtmp2[i2] else: dat[i].data = rtmp2 dat[i].data = np.ma.masked_invalid(dat[i].data) dat[i].data.mask = (np.ma.getmaskarray(dat[i].data) | (dat[i].data == nval)) if dat[i].data.mask.size == 1: dat[-1].mask = np.ma.getmaskarray(dat[-1].data) dat[i].data = dat[i].data * 1.0 if 'Emissivity/Mean' in bandid2: bandid = 'Emissivity_mean_band_'+str(10+i2) dat[i].data = dat[i].data * 0.001 if 'Emissivity/SDev' in bandid2: bandid = 'Emissivity_std_dev_band_'+str(10+i2) dat[i].data = dat[i].data * 0.0001 if 'NDVI/Mean' in bandid2: bandid = 'NDVI_mean' dat[i].data = dat[i].data * 0.01 if 'NDVI/SDev' in bandid2: bandid = 'NDVI_std_dev' dat[i].data = dat[i].data * 0.01 if 'Temperature/Mean' in bandid2: bandid = 'Temperature_mean' units = 'Kelvin' dat[i].data = dat[i].data * 0.01 if 'Temperature/SDev' in bandid2: bandid = 'Temperature_std_dev' units = 'Kelvin' dat[i].data = dat[i].data * 0.01 dat[i].dataid = bandid dat[i].nullvalue = nval dat[i].extent_from_gtr(dataset.GetGeoTransform()) dat[i].units = units dat[i].wkt = dataset.GetProjectionRef() dataset = None return dat def get_aster_ged_bin(ifile): """ Get ASTER GED binary format. Emissivity_Mean_Description: Mean Emissivity for each pixel on grid-box using all ASTER data from 2000-2010 Emissivity_SDev_Description: Emissivity Standard Deviation for each pixel on grid-box using all ASTER data from 2000-2010 Temperature_Mean_Description: Mean Temperature (K) for each pixel on grid-box using all ASTER data from 2000-2010 Temperature_SDev_Description: Temperature Standard Deviation for each pixel on grid-box using all ASTER data from 2000-2010 NDVI_Mean_Description: Mean NDVI for each pixel on grid-box using all ASTER data from 2000-2010 NDVI_SDev_Description: NDVI Standard Deviation for each pixel on grid-box using all ASTER data from 2000-2010 Land_Water_Map_LWmap_Description: Land Water Map using ASTER visible bands Observations_NumObs_Description: Number of values used in computing mean and standard deviation for each pixel. Geolocation_Latitude_Description: Latitude Geolocation_Longitude_Description: Longitude ASTER_GDEM_ASTGDEM_Description: ASTER GDEM resampled to NAALSED Parameters ---------- ifile : str filename to import Returns ------- dat : PyGMI raster Data dataset imported """ dat = [] nval = -9999 bandid = {} bandid[0] = 'Emissivity_mean_band_10' bandid[1] = 'Emissivity_mean_band_11' bandid[2] = 'Emissivity_mean_band_12' bandid[3] = 'Emissivity_mean_band_13' bandid[4] = 'Emissivity_mean_band_14' bandid[5] = 'Emissivity_std_dev_band_10' bandid[6] = 'Emissivity_std_dev_band_11' bandid[7] = 'Emissivity_std_dev_band_12' bandid[8] = 'Emissivity_std_dev_band_13' bandid[9] = 'Emissivity_std_dev_band_14' bandid[10] = 'Temperature_mean' bandid[11] = 'Temperature_std_dev' bandid[12] = 'NDVI_mean' bandid[13] = 'NDVI_std_dev' bandid[14] = 'Land_water_map' bandid[15] = 'Observations' bandid[16] = 'Latitude' bandid[17] = 'Longitude' bandid[18] = 'ASTER GDEM' scale = [0.001, 0.001, 0.001, 0.001, 0.001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.01, 0.01, 0.01, 0.01, 1, 1, 0.001, 0.001, 1] units = ['', '', '', '', '', '', '', '', '', '', 'Kelvin', 'Kelvin', '', '', '', 'Number per pixel', 'degrees', 'degrees', 'meters'] data = np.fromfile(ifile, dtype=np.int32) rows_cols = int((data.size/19)**0.5) data.shape = (19, rows_cols, rows_cols) lats = data[16]*scale[16] lons = data[17]*scale[17] latsdim = (lats.max()-lats.min())/(lats.shape[0]-1) lonsdim = (lons.max()-lons.min())/(lons.shape[0]-1) tlx = lons.min()-abs(lonsdim/2) tly = lats.max()+abs(latsdim/2) for i in range(19): dat.append(Data()) dat[i].data = data[i]*scale[i] dat[i].dataid = bandid[i] dat[i].nullvalue = nval*scale[i] dat[i].xdim = lonsdim dat[i].ydim = latsdim dat[i].units = units[i] rows, cols = dat[i].data.shape xmin = tlx ymax = tly ymin = ymax - rows*dat[i].ydim xmax = xmin + cols*dat[i].xdim dat[i].extent = [xmin, xmax, ymin, ymax] dat.pop(17) dat.pop(16) return dat def _testfn(): """Test routine.""" ifile = r'D:/Workdata/Remote Sensing/Landsat/LC08_L1TP_176080_20190820_20190903_01_T1.tar.gz' # ifile = r'D:/Workdata/Remote Sensing/Landsat/LE071700782002070201T1-SC20200519113053.tar.gz' # ifile = r'D:/Workdata/Remote Sensing/Landsat/LT051700781997071201T1-SC20200519120230.tar.gz' ifile = r'D:\Workdata\Remote Sensing\ASTER\old\AST_07XT_00305282005083844_20180604061623_15509.hdf' # ifile = r'D:\Workdata\Remote Sensing\ASTER\old\AST_07XT_00309042002082052_20200518021739_29313.zip' dat = get_data(ifile) # ifile = r'C:/Work/Workdata/Remote Sensing/Modis/MOD11A2.A2013073.h20v11.006.2016155170529.hdf' # dat = get_modisv6(ifile) breakpoint() if __name__ == "__main__": _testfn()
gpl-3.0
nhejazi/scikit-learn
examples/linear_model/plot_robust_fit.py
147
3050
""" Robust linear estimator fitting =============================== Here a sine function is fit with a polynomial of order 3, for values close to zero. Robust fitting is demoed in different situations: - No measurement errors, only modelling errors (fitting a sine with a polynomial) - Measurement errors in X - Measurement errors in y The median absolute deviation to non corrupt new data is used to judge the quality of the prediction. What we can see that: - RANSAC is good for strong outliers in the y direction - TheilSen is good for small outliers, both in direction X and y, but has a break point above which it performs worse than OLS. - The scores of HuberRegressor may not be compared directly to both TheilSen and RANSAC because it does not attempt to completely filter the outliers but lessen their effect. """ from matplotlib import pyplot as plt import numpy as np from sklearn.linear_model import ( LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor) from sklearn.metrics import mean_squared_error from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline np.random.seed(42) X = np.random.normal(size=400) y = np.sin(X) # Make sure that it X is 2D X = X[:, np.newaxis] X_test = np.random.normal(size=200) y_test = np.sin(X_test) X_test = X_test[:, np.newaxis] y_errors = y.copy() y_errors[::3] = 3 X_errors = X.copy() X_errors[::3] = 3 y_errors_large = y.copy() y_errors_large[::3] = 10 X_errors_large = X.copy() X_errors_large[::3] = 10 estimators = [('OLS', LinearRegression()), ('Theil-Sen', TheilSenRegressor(random_state=42)), ('RANSAC', RANSACRegressor(random_state=42)), ('HuberRegressor', HuberRegressor())] colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'} linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'} lw = 3 x_plot = np.linspace(X.min(), X.max()) for title, this_X, this_y in [ ('Modeling Errors Only', X, y), ('Corrupt X, Small Deviants', X_errors, y), ('Corrupt y, Small Deviants', X, y_errors), ('Corrupt X, Large Deviants', X_errors_large, y), ('Corrupt y, Large Deviants', X, y_errors_large)]: plt.figure(figsize=(5, 4)) plt.plot(this_X[:, 0], this_y, 'b+') for name, estimator in estimators: model = make_pipeline(PolynomialFeatures(3), estimator) model.fit(this_X, this_y) mse = mean_squared_error(model.predict(X_test), y_test) y_plot = model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name], linewidth=lw, label='%s: error = %.3f' % (name, mse)) legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data' legend = plt.legend(loc='upper right', frameon=False, title=legend_title, prop=dict(size='x-small')) plt.xlim(-4, 10.2) plt.ylim(-2, 10.2) plt.title(title) plt.show()
bsd-3-clause
keras-team/autokeras
autokeras/adapters/output_adapters.py
1
1737
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import tensorflow as tf from autokeras.engine import adapter as adapter_module class HeadAdapter(adapter_module.Adapter): def __init__(self, name, **kwargs): super().__init__(**kwargs) self.name = name def check(self, dataset): supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series) if not isinstance(dataset, supported_types): raise TypeError( "Expect the target data of {name} to be tf.data.Dataset," " np.ndarray, pd.DataFrame or pd.Series, but got {type}.".format( name=self.name, type=type(dataset) ) ) def convert_to_dataset(self, dataset, batch_size): if isinstance(dataset, pd.DataFrame): dataset = dataset.values if isinstance(dataset, pd.Series): dataset = dataset.values return super().convert_to_dataset(dataset, batch_size) class ClassificationAdapter(HeadAdapter): pass class RegressionAdapter(HeadAdapter): pass class SegmentationHeadAdapter(ClassificationAdapter): pass
apache-2.0
Jimmy-Morzaria/scikit-learn
sklearn/feature_selection/__init__.py
244
1088
""" The :mod:`sklearn.feature_selection` module implements feature selection algorithms. It currently includes univariate filter selection methods and the recursive feature elimination algorithm. """ from .univariate_selection import chi2 from .univariate_selection import f_classif from .univariate_selection import f_oneway from .univariate_selection import f_regression from .univariate_selection import SelectPercentile from .univariate_selection import SelectKBest from .univariate_selection import SelectFpr from .univariate_selection import SelectFdr from .univariate_selection import SelectFwe from .univariate_selection import GenericUnivariateSelect from .variance_threshold import VarianceThreshold from .rfe import RFE from .rfe import RFECV __all__ = ['GenericUnivariateSelect', 'RFE', 'RFECV', 'SelectFdr', 'SelectFpr', 'SelectFwe', 'SelectKBest', 'SelectPercentile', 'VarianceThreshold', 'chi2', 'f_classif', 'f_oneway', 'f_regression']
bsd-3-clause
start-jsk/jsk_apc
demos/grasp_data_generator/grasp_data_generator/datasets/semantic_dataset.py
1
4600
import argparse import matplotlib.pyplot as plt import numpy as np import os import os.path as osp import yaml import chainer import imgaug.augmenters as iaa from imgaug.parameters import Deterministic import scipy.misc from sklearn.model_selection import train_test_split from chainercv.visualizations import vis_semantic_segmentation filepath = osp.dirname(osp.realpath(__file__)) class SemanticRealAnnotatedDataset(chainer.dataset.DatasetMixin): def __init__( self, split='train', random_state=1234, imgaug=True, test_size=0.1, ): self.split = split ids = sorted(os.listdir(self.data_dir)) ids = [d for d in ids if osp.isdir(osp.join(self.data_dir, d))] ids_train, ids_val = train_test_split( ids, test_size=test_size, random_state=random_state) ids_train = sorted(ids_train) ids_val = sorted(ids_val) self._ids = {'all': ids, 'train': ids_train, 'val': ids_val} with open(osp.join(self.data_dir, 'label_names.yaml')) as f: self.label_names = yaml.load(f) self.label_names = ['background'] + self.label_names st = lambda x: iaa.Sometimes(0.3, x) # NOQA if imgaug: self.color_aug = iaa.Sequential( [ st(iaa.InColorspace( 'HSV', children=iaa.WithChannels([1, 2], iaa.Multiply([0.5, 2])))), iaa.WithChannels([0, 1], iaa.Multiply([1, 1.5])), ], random_order=False, random_state=random_state) self.aug = iaa.Sequential( [ iaa.Affine( cval=0, order=0, rotate=0, mode='constant'), iaa.Fliplr(0) ], random_order=False, random_state=random_state) else: self.aug = None def __len__(self): return len(self._ids[self.split]) def get_example(self, i): data_id = self._ids[self.split][i] datum_dir = osp.join(self.data_dir, data_id) img = scipy.misc.imread(osp.join(datum_dir, 'rgb.png')) ins_label = np.load(osp.join(datum_dir, 'ins_imgs.npz'))['ins_imgs'] cls_ids = np.array( yaml.load(open(osp.join(datum_dir, 'labels.yaml'))), dtype=np.int32) H, W, _ = img.shape label = - np.ones((H, W), dtype=np.int32) for ins_lbl, cls_id in zip(ins_label, cls_ids): label[ins_lbl == 1] = cls_id if self.aug: aug_rotate_angle = np.random.uniform(-180, 180) fliplr = np.random.uniform() > 0.5 img = self.color_aug.augment_image(img) aug = self.aug.to_deterministic() aug[0].order = Deterministic(1) aug[0].rotate = Deterministic(-1 * aug_rotate_angle) aug[1].p = Deterministic(1 if fliplr else 0) img = aug.augment_image(img) aug[0].order = Deterministic(0) label = aug.augment_image(label) img = img.astype(np.float32) label = label + 1 return img, label def visualize(self, i): img, label = self.get_example(i) img = img.transpose((2, 0, 1)) ax, legend_handles = vis_semantic_segmentation( img, label, label_names=self.label_names, alpha=0.8) ax.legend(handles=legend_handles, bbox_to_anchor=(1, 1), loc=2) plt.show() class SemanticRealAnnotatedDatasetV1(SemanticRealAnnotatedDataset): data_dir = osp.join(filepath, '../../data/evaluation_data/20181231_194442') class SemanticRealAnnotatedDatasetV2(SemanticRealAnnotatedDataset): data_dir = osp.join(filepath, '../../data/evaluation_data/20190107_142843') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--aug', '-a', action='store_true') parser.add_argument('--dataset', choices=['v1', 'v2'], default='v1', help='Dataset version') args = parser.parse_args() if args.dataset == 'v1': dataset = SemanticRealAnnotatedDatasetV1(split='all', imgaug=args.aug) elif args.dataset == 'v2': dataset = SemanticRealAnnotatedDatasetV2(split='all', imgaug=args.aug) else: raise ValueError( 'Given dataset is not supported: {}'.format(args.dataset)) for i in range(0, len(dataset)): dataset.visualize(i)
bsd-3-clause
jhlch/sparklingpandas
sparklingpandas/__init__.py
1
2793
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ SparkingPandas provides support for a Pandas-like api on Spark. Spark DataFrames do not provide the api that Pandas users are used to in data frames. In order to provide an API similar to Pandas' DataFrames or R's Dataframes we provide the classes [[prdd]] """ """from sparklingpandas.dataframe import DataFrame from sparklingpandas.pcontext import PSparkContext __all__ = ["DataFrame", "PSparkContext"] """ import os import sys if 'IS_TEST' not in os.environ and "JARS" not in os.environ: VERSION = '0.0.4' JAR_FILE = 'sparklingpandas-assembly-' + VERSION + '-SNAPSHOT.jar' my_location = os.path.dirname(os.path.realpath(__file__)) prefixes = [ # For development, use the sbt target scala-2.10 first # since the init script is in sparklingpandas move up one dir os.path.join(my_location, '../target/scala-2.10/'), # Also try the present working directory os.path.join(os.getcwd(), '../target/scala-2.10/'), os.path.join(os.getcwd(), 'target/scala-2.10/'), # For "production" look at some places where we might be installed # sys.prefix gives us /usr on many systems and our jars end up in # /usr/local os.path.join(my_location, '../current-release/'), os.path.join(sys.prefix, "local/jars/"), # For virtual env our jars end up directly under sys.prefix os.path.join(sys.prefix, "jars/"), # Since we are installed in a libs/python-version/sparkling panadas # directory, jump three up and go to the jars file. os.path.join(my_location, '../../../jars/')] jars = map(lambda prefix: os.path.join(prefix, JAR_FILE), prefixes) try: jar = filter(lambda path: os.path.exists(path), jars)[0] except IndexError: raise IOError("Failed to find jars " + str(jar)) os.environ["JARS"] = jar os.environ["PYSPARK_SUBMIT_ARGS"] = ("--jars %s --driver-class-path %s" + " pyspark-shell") % (jar, jar)
apache-2.0
ichuang/sympy
examples/intermediate/sample.py
3
3446
""" Utility functions for plotting sympy functions. See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d graphing functions using matplotlib. """ from numpy import repeat, arange, empty, ndarray, array from sympy import Symbol, Basic, Rational, I, sympify def sample2d(f, x_args): """ Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x**2. x_args is an interval given in the form (var, min, max, n) """ try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpretted as a SymPy function") try: x, x_min, x_max, x_n = x_args except AttributeError: raise ValueError("x_args must be a tuple of the form (var, min, max, n)") x_l = float(x_max - x_min) x_d = x_l/float(x_n) X = arange(float(x_min), float(x_max)+x_d, x_d) Y = empty(len(X)) for i in range(len(X)): try: Y[i] = float(f.subs(x, X[i])) except TypeError: Y[i] = None return X, Y def sample3d(f, x_args, y_args): """ Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x**2 + y**2. x_args and y_args are intervals given in the form (var, min, max, n) """ x, x_min, x_max, x_n = None, None, None, None y, y_min, y_max, y_n = None, None, None, None try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args y, y_min, y_max, y_n = y_args except AttributeError: raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)") x_l = float(x_max - x_min) x_d = x_l/float(x_n) x_a = arange(float(x_min), float(x_max)+x_d, x_d) y_l = float(y_max - y_min) y_d = y_l/float(y_n) y_a = arange(float(y_min), float(y_max)+y_d, y_d) def meshgrid(x, y): """ Taken from matplotlib.mlab.meshgrid. """ x = array(x) y = array(y) numRows, numCols = len(y), len(x) x.shape = 1, numCols X = repeat(x, numRows, 0) y.shape = numRows, 1 Y = repeat(y, numCols, 1) return X, Y X, Y = meshgrid(x_a, y_a) Z = ndarray((len(X), len(X[0]))) for j in range(len(X)): for k in range(len(X[0])): try: Z[j][k] = float( f.subs(x, X[j][k]).subs(y, Y[j][k]) ) except (TypeError, NotImplementedError): Z[j][k] = 0 return X, Y, Z def sample(f, *var_args): """ Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x**2. var_args are intervals for each variable given in the form (var, min, max, n) """ if len(var_args) == 1: return sample2d(f, var_args[0]) elif len(var_args) == 2: return sample3d(f, var_args[0], var_args[1]) else: raise ValueError("Only 2d and 3d sampling are supported at this time.")
bsd-3-clause
jmnybl/keywords_webgui
keywords.py
1
9893
import sys import json import conllutil3 as cu import sklearn.feature_extraction from sklearn.svm import LinearSVC import argparse import requests from collections import defaultdict import jinja2 from random import shuffle import traceback TMPDIR="tmp_dir/" RESDIR="static/results/" #def formulate_query(words,random,lemma): ## words=['"'+w+'"' for w in words] # escape special characters, not able use this if this will be dep_search queries TODO: fix dep_search lemma # if lemma: # q="|".join("L="+w for w in words) # else: # q="|".join(words) # if random: # negate the query # #q="!(_ + "+q.replace("|","&")+")" # q="_ -> !("+q.replace("|","&")+")" # print(q) # return q def collect_data(query,stopwords=set(),case_sensitive=False,lemma=False,adjective=False,max_sent=10000): """ If random=True, use random sentences not containing the given words stopwords is a set of words which should be masked (removed) """ results=[] sent=[] r=requests.get("http://epsilon-it.utu.fi/dep_search_webapi",params={"db":"PBV4", "search":query, "case":case_sensitive, "retmax":max_sent, "shuffle":True},stream=True) #print("Getting",r.url,file=sys.stderr) for hit in r.iter_lines(): # hit is a line hit=hit.decode("utf-8").strip() #print("HIT:::",hit,file=sys.stderr) if not hit: # sentence break if sent: results.append(" ".join(sent)) sent=[] continue if hit.startswith("#"): continue else: hit=hit.split("\t") if adjective==True and hit[cu.CPOS]!="ADJ": continue if lemma: if hit[cu.LEMMA].lower().replace("-","").replace("#","") not in stopwords: # case insensitive lemmas sent.append(hit[cu.LEMMA].lower()) else: if hit[cu.FORM].lower() not in stopwords: sent.append(hit[cu.FORM].lower()) else: if sent: results.append(" ".join(sent)) return results def collect_data_korp(words=[],stopwords=set(),corpus="s24_001,s24_002,s24_003,s24_004,s24_005,s24_006,s24_007,s24_008,s24_009,s24_010",random=False,case_sensitive=False,lemma=False,adjective=False,max_sent=10000): """ If random=True, use random sentences not containing the given words stopwords is a set of words which should be masked (removed) """ if lemma: form="lemma" else: form="word" if case_sensitive: case="" else: case="(?i)" if random: neg="!" else: neg="" expressions=[] for word in words: word=word.replace(":","\:").replace(")","\)").replace("(","\(").replace("|","\|") expressions.append('[{N}({F} = "{C}{W}")]'.format(N=neg,F=form,C=case,W=word)) # '([word = "(?i)kreikka"]|[word = "(?i)kreikkalainen"])' cqp_query="|".join(e for e in expressions) extra='&defaultcontext=1+sentence&defaultwithin=sentence&show=sentence,paragraph,lemma,pos&show_struct=sentence_id&start=0&end={M}'.format(M=max_sent) url="https://korp.csc.fi/cgi-bin/korp.cgi?command={command}{extra_param}&corpus={C}&cqp={cqp}".format(command="query",extra_param=extra,C=corpus,cqp=cqp_query) #print("Getting url:",url,file=sys.stderr) hits=requests.get(url) data=hits.json() # print(data["kwic"][0]['tokens']) sent_ids=set() if "kwic" not in data: #print("No results...") return [] results=[] for sent in data["kwic"]: idx=sent["structs"]["sentence_id"] if idx in sent_ids: continue sent_ids.add(idx) # print(sent) sentence=[] for token in sent['tokens']: if (token["word"] is not None) and (form in token) and (token[form].lower() not in stopwords): if adjective and (("pos" not in token) or (token["pos"]!="A")): continue sentence.append(token[form].lower()) if sentence: results.append(" ".join(sentence)) return results def simple_tokenizer(txt): """ Simple tokenizer, default one splits hyphens and other weirdish stuff. """ return txt.split(" ") def train_svm(data,labels): vectorizer=sklearn.feature_extraction.text.TfidfVectorizer(tokenizer=simple_tokenizer,max_df=0.3,sublinear_tf=True,use_idf=False) d=vectorizer.fit_transform(data) classifier = LinearSVC(C=0.1) classifier.fit(d,labels) features=[] f_names=vectorizer.get_feature_names() for i,class_vector in enumerate(classifier.coef_): sorted_by_weight=sorted(zip(class_vector,f_names), reverse=True) features.append([]) for f_weight,f_name in sorted_by_weight[:50]: features[-1].append((f_name,"{:.3}".format(f_weight))) if len(classifier.coef_)==1: # use negative features sorted_by_weight=sorted(zip(classifier.coef_[0],f_names)) features.insert(0,[]) # these are features for first class for f_weight,f_name in sorted_by_weight[:50]: features[0].append((f_name,"{:.3}".format(f_weight*-1))) return features def generate_html(fname,path,messages=[],features=[],ready=False): if len(features)<=6: fcol=2 emptydiv=12-len(features)*fcol elif len(features)<=12: fcol=1 emptydiv=12-len(features)*fcol else: fcol=1 emptydiv=0 with open(fname,"wt",encoding="utf-8") as f: template=jinja2.Environment(loader=jinja2.FileSystemLoader("./templates/")).get_template("result_tbl.html") rendered=template.render({"path":path,"messages":messages,"features":features,"ready":ready,"fcol":fcol,"emptydiv":emptydiv}) print(rendered,file=f) korpdef={"PB":"PB","S24":"s24_001,s24_002,s24_003,s24_004,s24_005,s24_006,s24_007,s24_008,s24_009,s24_010"} def main(hashed_json,path): # read json to get correct settings with open(TMPDIR+hashed_json+".json","rt") as f: d=json.load(f) fname=u"".join((RESDIR,hashed_json,d["date"],d["time"],".html")) info=[] info.append(d["date"]+" "+d["time"].replace("-",":")) if d["corpus"]=="PB": info.append("Keywords: "+str(d["keywords"])) print(d["keywords"]) else: info.append("Keywords: "+u" & ".join(",".join(klist) for klist in d["keywords"])) info.append("Random:"+str(d["random"])+" Case sensitive:"+str(d["case_sensitive"])+" Lemma:"+str(d["lemma"])+" Only adjectives:"+str(d["adjective"])) generate_html(fname,path,messages=info) class_names=[] labels=[] dataset=[] if d["corpus"]!="PB": uniq_words=set([w.lower() for sublist in d["keywords"] for w in sublist]) # set of unique words to use in masking else: uniq_words=set() try: # collect data for wordlist in d["keywords"]: if d["corpus"]=="PB": data=collect_data(wordlist,stopwords=uniq_words,case_sensitive=d["case_sensitive"],lemma=d["lemma"],adjective=d["adjective"]) else: data=collect_data_korp(words=wordlist,stopwords=uniq_words,corpus=korpdef[d["corpus"]],random=False,case_sensitive=d["case_sensitive"],lemma=d["lemma"],adjective=d["adjective"]) shuffle(data) random=data[:5000] if d["corpus"]=="PB": info.append(wordlist+" dataset size: {r}/{a}".format(r=str(len(random)),a=str(len(data)))) else: info.append(u",".join(wordlist)+" dataset size: {r}/{a}".format(r=str(len(random)),a=str(len(data)))) generate_html(fname,path,messages=info) if data: if isinstance(wordlist,list): class_names.append(u",".join(wordlist)) else: class_names.append(wordlist) dataset+=random labels+=[len(class_names)-1]*len(random) if len(class_names)==1 and d["random"]==True: data=collect_data_korp(words=d["keywords"][0],stopwords=uniq_words,corpus=korpdef[d["corpus"]],random=True,case_sensitive=d["case_sensitive"],lemma=d["lemma"],adjective=d["adjective"]) shuffle(data) random=data[:5000] info.append(u"Contrastive dataset size: {r}/{a}".format(r=str(len(random)),a=str(len(data)))) generate_html(fname,path,messages=info) if data: class_names.append("Contrastive") dataset+=random labels+=[len(class_names)-1]*len(random) # train svm features=train_svm(dataset,labels) flists=[] for i,feats in enumerate(features): # if class_names[i]=="Contrastive": # query=formulate_query(class_names[0].split(","),True,d["lemma"]) # else: # query=formulate_query(class_names[i].split(","),False,d["lemma"]) # link2query="<a href='http://epsilon-it.utu.fi/dep_search_webgui/query?db=S24&search={q}'>{text}</a>".format(q=query,text=class_names[i]) link2query=class_names[i] print(link2query) flists.append((link2query,feats)) except Exception as e: print(e) traceback.print_exc() info.append("Error: "+str(e)) flists=[] info.append("Done. This page will stay static, you can save the link to access the results also later.") generate_html(fname,path,messages=info,features=flists,ready=True) if __name__=="__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--hash', type=str, help='Hash of the jsoned settings') parser.add_argument('--path', type=str, help='Path for style files') args = parser.parse_args() main(args.hash,args.path)
gpl-3.0
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/pandas/formats/format.py
7
96434
# -*- coding: utf-8 -*- """ Internal module for formatting output data in csv, html, and latex files. This module also applies to display formatting. """ from __future__ import print_function from distutils.version import LooseVersion # pylint: disable=W0141 import sys from pandas.types.missing import isnull, notnull from pandas.types.common import (is_categorical_dtype, is_float_dtype, is_period_arraylike, is_integer_dtype, is_datetimetz, is_integer, is_float, is_numeric_dtype, is_datetime64_dtype, is_timedelta64_dtype) from pandas.types.generic import ABCSparseArray from pandas.core.base import PandasObject from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat from pandas.compat import (StringIO, lzip, range, map, zip, reduce, u, OrderedDict, unichr) from pandas.util.terminal import get_terminal_size from pandas.core.config import get_option, set_option from pandas.io.common import _get_handle, UnicodeWriter, _expand_user from pandas.formats.printing import adjoin, justify, pprint_thing import pandas.core.common as com import pandas.lib as lib from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex import pandas as pd import numpy as np import itertools import csv common_docstring = """ Parameters ---------- buf : StringIO-like, optional buffer to write to columns : sequence, optional the subset of columns to write; default None writes all columns col_space : int, optional the minimum width of each column header : bool, optional whether to print column labels, default True index : bool, optional whether to print index (row) labels, default True na_rep : string, optional string representation of NAN to use, default 'NaN' formatters : list or dict of one-parameter functions, optional formatter functions to apply to columns' elements by position or name, default None. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats, default None. The result of this function must be a unicode string. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row, default True index_names : bool, optional Prints the names of the indexes, default True line_width : int, optional Width to wrap a line in characters, default no wrap""" justify_docstring = """ justify : {'left', 'right'}, default None Left or right-justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box.""" return_docstring = """ Returns ------- formatted : string (or unicode, depending on data and options)""" docstring_to_string = common_docstring + justify_docstring + return_docstring class CategoricalFormatter(object): def __init__(self, categorical, buf=None, length=True, na_rep='NaN', footer=True): self.categorical = categorical self.buf = buf if buf is not None else StringIO(u("")) self.na_rep = na_rep self.length = length self.footer = footer def _get_footer(self): footer = '' if self.length: if footer: footer += ', ' footer += "Length: %d" % len(self.categorical) level_info = self.categorical._repr_categories_info() # Levels are added in a newline if footer: footer += '\n' footer += level_info return compat.text_type(footer) def _get_formatted_values(self): return format_array(self.categorical.get_values(), None, float_format=None, na_rep=self.na_rep) def to_string(self): categorical = self.categorical if len(categorical) == 0: if self.footer: return self._get_footer() else: return u('') fmt_values = self._get_formatted_values() result = ['%s' % i for i in fmt_values] result = [i.strip() for i in result] result = u(', ').join(result) result = [u('[') + result + u(']')] if self.footer: footer = self._get_footer() if footer: result.append(footer) return compat.text_type(u('\n').join(result)) class SeriesFormatter(object): def __init__(self, series, buf=None, length=True, header=True, index=True, na_rep='NaN', name=False, float_format=None, dtype=True, max_rows=None): self.series = series self.buf = buf if buf is not None else StringIO() self.name = name self.na_rep = na_rep self.header = header self.length = length self.index = index self.max_rows = max_rows if float_format is None: float_format = get_option("display.float_format") self.float_format = float_format self.dtype = dtype self.adj = _get_adjustment() self._chk_truncate() def _chk_truncate(self): from pandas.tools.merge import concat max_rows = self.max_rows truncate_v = max_rows and (len(self.series) > max_rows) series = self.series if truncate_v: if max_rows == 1: row_num = max_rows series = series.iloc[:max_rows] else: row_num = max_rows // 2 series = concat((series.iloc[:row_num], series.iloc[-row_num:])) self.tr_row_num = row_num self.tr_series = series self.truncate_v = truncate_v def _get_footer(self): name = self.series.name footer = u('') if getattr(self.series.index, 'freq', None) is not None: footer += 'Freq: %s' % self.series.index.freqstr if self.name is not False and name is not None: if footer: footer += ', ' series_name = pprint_thing(name, escape_chars=('\t', '\r', '\n')) footer += ("Name: %s" % series_name) if name is not None else "" if self.length: if footer: footer += ', ' footer += 'Length: %d' % len(self.series) if self.dtype is not False and self.dtype is not None: name = getattr(self.tr_series.dtype, 'name', None) if name: if footer: footer += ', ' footer += 'dtype: %s' % pprint_thing(name) # level infos are added to the end and in a new line, like it is done # for Categoricals if is_categorical_dtype(self.tr_series.dtype): level_info = self.tr_series._values._repr_categories_info() if footer: footer += "\n" footer += level_info return compat.text_type(footer) def _get_formatted_index(self): index = self.tr_series.index is_multi = isinstance(index, MultiIndex) if is_multi: have_header = any(name for name in index.names) fmt_index = index.format(names=True) else: have_header = index.name is not None fmt_index = index.format(name=True) return fmt_index, have_header def _get_formatted_values(self): return format_array(self.tr_series._values, None, float_format=self.float_format, na_rep=self.na_rep) def to_string(self): series = self.tr_series footer = self._get_footer() if len(series) == 0: return 'Series([], ' + footer + ')' fmt_index, have_header = self._get_formatted_index() fmt_values = self._get_formatted_values() if self.truncate_v: n_header_rows = 0 row_num = self.tr_row_num width = self.adj.len(fmt_values[row_num - 1]) if width > 3: dot_str = '...' else: dot_str = '..' # Series uses mode=center because it has single value columns # DataFrame uses mode=left dot_str = self.adj.justify([dot_str], width, mode='center')[0] fmt_values.insert(row_num + n_header_rows, dot_str) fmt_index.insert(row_num + 1, '') if self.index: result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values]) else: result = self.adj.adjoin(3, fmt_values).replace('\n ', '\n').strip() if self.header and have_header: result = fmt_index[0] + '\n' + result if footer: result += '\n' + footer return compat.text_type(u('').join(result)) class TextAdjustment(object): def __init__(self): self.encoding = get_option("display.encoding") def len(self, text): return compat.strlen(text, encoding=self.encoding) def justify(self, texts, max_len, mode='right'): return justify(texts, max_len, mode=mode) def adjoin(self, space, *lists, **kwargs): return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs) class EastAsianTextAdjustment(TextAdjustment): def __init__(self): super(EastAsianTextAdjustment, self).__init__() if get_option("display.unicode.ambiguous_as_wide"): self.ambiguous_width = 2 else: self.ambiguous_width = 1 def len(self, text): return compat.east_asian_len(text, encoding=self.encoding, ambiguous_width=self.ambiguous_width) def justify(self, texts, max_len, mode='right'): # re-calculate padding space per str considering East Asian Width def _get_pad(t): return max_len - self.len(t) + len(t) if mode == 'left': return [x.ljust(_get_pad(x)) for x in texts] elif mode == 'center': return [x.center(_get_pad(x)) for x in texts] else: return [x.rjust(_get_pad(x)) for x in texts] def _get_adjustment(): use_east_asian_width = get_option("display.unicode.east_asian_width") if use_east_asian_width: return EastAsianTextAdjustment() else: return TextAdjustment() class TableFormatter(object): is_truncated = False show_dimensions = None @property def should_show_dimensions(self): return (self.show_dimensions is True or (self.show_dimensions == 'truncate' and self.is_truncated)) def _get_formatter(self, i): if isinstance(self.formatters, (list, tuple)): if is_integer(i): return self.formatters[i] else: return None else: if is_integer(i) and i not in self.columns: i = self.columns[i] return self.formatters.get(i, None) class DataFrameFormatter(TableFormatter): """ Render a DataFrame self.to_string() : console-friendly tabular output self.to_html() : html table self.to_latex() : LaTeX tabular environment table """ __doc__ = __doc__ if __doc__ else '' __doc__ += common_docstring + justify_docstring + return_docstring def __init__(self, frame, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, justify=None, float_format=None, sparsify=None, index_names=True, line_width=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', **kwds): self.frame = frame self.buf = _expand_user(buf) if buf is not None else StringIO() self.show_index_names = index_names if sparsify is None: sparsify = get_option("display.multi_sparse") self.sparsify = sparsify self.float_format = float_format self.formatters = formatters if formatters is not None else {} self.na_rep = na_rep self.decimal = decimal self.col_space = col_space self.header = header self.index = index self.line_width = line_width self.max_rows = max_rows self.max_cols = max_cols self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame)) self.show_dimensions = show_dimensions if justify is None: self.justify = get_option("display.colheader_justify") else: self.justify = justify self.kwds = kwds if columns is not None: self.columns = _ensure_index(columns) self.frame = self.frame[self.columns] else: self.columns = frame.columns self._chk_truncate() self.adj = _get_adjustment() def _chk_truncate(self): """ Checks whether the frame should be truncated. If so, slices the frame up. """ from pandas.tools.merge import concat # Column of which first element is used to determine width of a dot col self.tr_size_col = -1 # Cut the data to the information actually printed max_cols = self.max_cols max_rows = self.max_rows if max_cols == 0 or max_rows == 0: # assume we are in the terminal # (why else = 0) (w, h) = get_terminal_size() self.w = w self.h = h if self.max_rows == 0: dot_row = 1 prompt_row = 1 if self.show_dimensions: show_dimension_rows = 3 n_add_rows = (self.header + dot_row + show_dimension_rows + prompt_row) # rows available to fill with actual data max_rows_adj = self.h - n_add_rows self.max_rows_adj = max_rows_adj # Format only rows and columns that could potentially fit the # screen if max_cols == 0 and len(self.frame.columns) > w: max_cols = w if max_rows == 0 and len(self.frame) > h: max_rows = h if not hasattr(self, 'max_rows_adj'): self.max_rows_adj = max_rows if not hasattr(self, 'max_cols_adj'): self.max_cols_adj = max_cols max_cols_adj = self.max_cols_adj max_rows_adj = self.max_rows_adj truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj) truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj) frame = self.frame if truncate_h: if max_cols_adj == 0: col_num = len(frame.columns) elif max_cols_adj == 1: frame = frame.iloc[:, :max_cols] col_num = max_cols else: col_num = (max_cols_adj // 2) frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1) self.tr_col_num = col_num if truncate_v: if max_rows_adj == 0: row_num = len(frame) if max_rows_adj == 1: row_num = max_rows frame = frame.iloc[:max_rows, :] else: row_num = max_rows_adj // 2 frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :])) self.tr_row_num = row_num self.tr_frame = frame self.truncate_h = truncate_h self.truncate_v = truncate_v self.is_truncated = self.truncate_h or self.truncate_v def _to_str_columns(self): """ Render a DataFrame to a list of columns (as lists of strings). """ frame = self.tr_frame # may include levels names also str_index = self._get_formatted_index(frame) str_columns = self._get_formatted_column_labels(frame) if self.header: stringified = [] for i, c in enumerate(frame): cheader = str_columns[i] max_colwidth = max(self.col_space or 0, *(self.adj.len(x) for x in cheader)) fmt_values = self._format_col(i) fmt_values = _make_fixed_width(fmt_values, self.justify, minimum=max_colwidth, adj=self.adj) max_len = max(np.max([self.adj.len(x) for x in fmt_values]), max_colwidth) cheader = self.adj.justify(cheader, max_len, mode=self.justify) stringified.append(cheader + fmt_values) else: stringified = [] for i, c in enumerate(frame): fmt_values = self._format_col(i) fmt_values = _make_fixed_width(fmt_values, self.justify, minimum=(self.col_space or 0), adj=self.adj) stringified.append(fmt_values) strcols = stringified if self.index: strcols.insert(0, str_index) # Add ... to signal truncated truncate_h = self.truncate_h truncate_v = self.truncate_v if truncate_h: col_num = self.tr_col_num # infer from column header col_width = self.adj.len(strcols[self.tr_size_col][0]) strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index))) if truncate_v: n_header_rows = len(str_index) - len(frame) row_num = self.tr_row_num for ix, col in enumerate(strcols): # infer from above row cwidth = self.adj.len(strcols[ix][row_num]) is_dot_col = False if truncate_h: is_dot_col = ix == col_num + 1 if cwidth > 3 or is_dot_col: my_str = '...' else: my_str = '..' if ix == 0: dot_mode = 'left' elif is_dot_col: cwidth = self.adj.len(strcols[self.tr_size_col][0]) dot_mode = 'center' else: dot_mode = 'right' dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0] strcols[ix].insert(row_num + n_header_rows, dot_str) return strcols def to_string(self): """ Render a DataFrame to a console-friendly tabular output. """ from pandas import Series frame = self.frame if len(frame.columns) == 0 or len(frame.index) == 0: info_line = (u('Empty %s\nColumns: %s\nIndex: %s') % (type(self.frame).__name__, pprint_thing(frame.columns), pprint_thing(frame.index))) text = info_line else: strcols = self._to_str_columns() if self.line_width is None: # no need to wrap around just print # the whole frame text = self.adj.adjoin(1, *strcols) elif (not isinstance(self.max_cols, int) or self.max_cols > 0): # need to wrap around text = self._join_multiline(*strcols) else: # max_cols == 0. Try to fit frame to terminal text = self.adj.adjoin(1, *strcols).split('\n') row_lens = Series(text).apply(len) max_len_col_ix = np.argmax(row_lens) max_len = row_lens[max_len_col_ix] headers = [ele[0] for ele in strcols] # Size of last col determines dot col size. See # `self._to_str_columns size_tr_col = len(headers[self.tr_size_col]) max_len += size_tr_col # Need to make space for largest row # plus truncate dot col dif = max_len - self.w adj_dif = dif col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) n_cols = len(col_lens) counter = 0 while adj_dif > 0 and n_cols > 1: counter += 1 mid = int(round(n_cols / 2.)) mid_ix = col_lens.index[mid] col_len = col_lens[mid_ix] adj_dif -= (col_len + 1) # adjoin adds one col_lens = col_lens.drop(mid_ix) n_cols = len(col_lens) max_cols_adj = n_cols - self.index # subtract index column self.max_cols_adj = max_cols_adj # Call again _chk_truncate to cut frame appropriately # and then generate string representation self._chk_truncate() strcols = self._to_str_columns() text = self.adj.adjoin(1, *strcols) if not self.index: text = text.replace('\n ', '\n').strip() self.buf.writelines(text) if self.should_show_dimensions: self.buf.write("\n\n[%d rows x %d columns]" % (len(frame), len(frame.columns))) def _join_multiline(self, *strcols): lwidth = self.line_width adjoin_width = 1 strcols = list(strcols) if self.index: idx = strcols.pop(0) lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width col_widths = [np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 for col in strcols] col_bins = _binify(col_widths, lwidth) nbins = len(col_bins) if self.truncate_v: nrows = self.max_rows_adj + 1 else: nrows = len(self.frame) str_lst = [] st = 0 for i, ed in enumerate(col_bins): row = strcols[st:ed] if self.index: row.insert(0, idx) if nbins > 1: if ed <= len(strcols) and i < nbins - 1: row.append([' \\'] + [' '] * (nrows - 1)) else: row.append([' '] * nrows) str_lst.append(self.adj.adjoin(adjoin_width, *row)) st = ed return '\n\n'.join(str_lst) def to_latex(self, column_format=None, longtable=False, encoding=None): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ latex_renderer = LatexFormatter(self, column_format=column_format, longtable=longtable) if encoding is None: encoding = 'ascii' if compat.PY2 else 'utf-8' if hasattr(self.buf, 'write'): latex_renderer.write_result(self.buf) elif isinstance(self.buf, compat.string_types): import codecs with codecs.open(self.buf, 'w', encoding=encoding) as f: latex_renderer.write_result(f) else: raise TypeError('buf is not a file name and it has no write ' 'method') def _format_col(self, i): frame = self.tr_frame formatter = self._get_formatter(i) return format_array(frame.iloc[:, i]._values, formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space, decimal=self.decimal) def to_html(self, classes=None, notebook=False, border=None): """ Render a DataFrame to a html table. Parameters ---------- classes : str or list-like classes to include in the `class` attribute of the opening ``<table>`` tag, in addition to the default "dataframe". notebook : {True, False}, optional, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening ``<table>`` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 """ html_renderer = HTMLFormatter(self, classes=classes, max_rows=self.max_rows, max_cols=self.max_cols, notebook=notebook, border=border) if hasattr(self.buf, 'write'): html_renderer.write_result(self.buf) elif isinstance(self.buf, compat.string_types): with open(self.buf, 'w') as f: html_renderer.write_result(f) else: raise TypeError('buf is not a file name and it has no write ' ' method') def _get_formatted_column_labels(self, frame): from pandas.core.index import _sparsify def is_numeric_dtype(dtype): return issubclass(dtype.type, np.number) columns = frame.columns if isinstance(columns, MultiIndex): fmt_columns = columns.format(sparsify=False, adjoin=False) fmt_columns = lzip(*fmt_columns) dtypes = self.frame.dtypes._values # if we have a Float level, they don't use leading space at all restrict_formatting = any([l.is_floating for l in columns.levels]) need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) def space_format(x, y): if (y not in self.formatters and need_leadsp[x] and not restrict_formatting): return ' ' + y return y str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns])) if self.sparsify: str_columns = _sparsify(str_columns) str_columns = [list(x) for x in zip(*str_columns)] else: fmt_columns = columns.format() dtypes = self.frame.dtypes need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) str_columns = [[' ' + x if not self._get_formatter(i) and need_leadsp[x] else x] for i, (col, x) in enumerate(zip(columns, fmt_columns))] if self.show_index_names and self.has_index_names: for x in str_columns: x.append('') # self.str_columns = str_columns return str_columns @property def has_index_names(self): return _has_names(self.frame.index) @property def has_column_names(self): return _has_names(self.frame.columns) def _get_formatted_index(self, frame): # Note: this is only used by to_string() and to_latex(), not by # to_html(). index = frame.index columns = frame.columns show_index_names = self.show_index_names and self.has_index_names show_col_names = (self.show_index_names and self.has_column_names) fmt = self._get_formatter('__index__') if isinstance(index, MultiIndex): fmt_index = index.format(sparsify=self.sparsify, adjoin=False, names=show_index_names, formatter=fmt) else: fmt_index = [index.format(name=show_index_names, formatter=fmt)] fmt_index = [tuple(_make_fixed_width(list(x), justify='left', minimum=(self.col_space or 0), adj=self.adj)) for x in fmt_index] adjoined = self.adj.adjoin(1, *fmt_index).split('\n') # empty space for columns if show_col_names: col_header = ['%s' % x for x in self._get_column_name_list()] else: col_header = [''] * columns.nlevels if self.header: return col_header + adjoined else: return adjoined def _get_column_name_list(self): names = [] columns = self.frame.columns if isinstance(columns, MultiIndex): names.extend('' if name is None else name for name in columns.names) else: names.append('' if columns.name is None else columns.name) return names class LatexFormatter(TableFormatter): """ Used to render a DataFrame to a LaTeX tabular/longtable environment output. Parameters ---------- formatter : `DataFrameFormatter` column_format : str, default None The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns longtable : boolean, default False Use a longtable environment instead of tabular. See also -------- HTMLFormatter """ def __init__(self, formatter, column_format=None, longtable=False): self.fmt = formatter self.frame = self.fmt.frame self.column_format = column_format self.longtable = longtable def write_result(self, buf): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = (u('Empty %s\nColumns: %s\nIndex: %s') % (type(self.frame).__name__, self.frame.columns, self.frame.index)) strcols = [[info_line]] else: strcols = self.fmt._to_str_columns() def get_col_type(dtype): if issubclass(dtype.type, np.number): return 'r' else: return 'l' if self.fmt.index and isinstance(self.frame.index, MultiIndex): clevels = self.frame.columns.nlevels strcols.pop(0) name = any(self.frame.index.names) for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format() blank = ' ' * len(lev2[0]) lev3 = [blank] * clevels if name: lev3.append(lev.name) for level_idx, group in itertools.groupby( self.frame.index.labels[i]): count = len(list(group)) lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) strcols.insert(i, lev3) column_format = self.column_format if column_format is None: dtypes = self.frame.dtypes._values column_format = ''.join(map(get_col_type, dtypes)) if self.fmt.index: index_format = 'l' * self.frame.index.nlevels column_format = index_format + column_format elif not isinstance(column_format, compat.string_types): # pragma: no cover raise AssertionError('column_format must be str or unicode, not %s' % type(column_format)) if not self.longtable: buf.write('\\begin{tabular}{%s}\n' % column_format) buf.write('\\toprule\n') else: buf.write('\\begin{longtable}{%s}\n' % column_format) buf.write('\\toprule\n') nlevels = self.frame.columns.nlevels if any(self.frame.index.names): nlevels += 1 for i, row in enumerate(zip(*strcols)): if i == nlevels and self.fmt.header: buf.write('\\midrule\n') # End of header if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') buf.write('\\multicolumn{3}{r}{{Continued on next ' 'page}} \\\\\n') buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.fmt.kwds.get('escape', True): # escape backslashes first crow = [(x.replace('\\', '\\textbackslash').replace('_', '\\_') .replace('%', '\\%').replace('$', '\\$') .replace('#', '\\#').replace('{', '\\{') .replace('}', '\\}').replace('~', '\\textasciitilde') .replace('^', '\\textasciicircum').replace('&', '\\&') if x else '{}') for x in row] else: crow = [x if x else '{}' for x in row] buf.write(' & '.join(crow)) buf.write(' \\\\\n') if not self.longtable: buf.write('\\bottomrule\n') buf.write('\\end{tabular}\n') else: buf.write('\\end{longtable}\n') class HTMLFormatter(TableFormatter): indent_delta = 2 def __init__(self, formatter, classes=None, max_rows=None, max_cols=None, notebook=False, border=None): self.fmt = formatter self.classes = classes self.frame = self.fmt.frame self.columns = self.fmt.tr_frame.columns self.elements = [] self.bold_rows = self.fmt.kwds.get('bold_rows', False) self.escape = self.fmt.kwds.get('escape', True) self.max_rows = max_rows or len(self.fmt.frame) self.max_cols = max_cols or len(self.fmt.columns) self.show_dimensions = self.fmt.show_dimensions self.is_truncated = (self.max_rows < len(self.fmt.frame) or self.max_cols < len(self.fmt.columns)) self.notebook = notebook if border is None: border = get_option('html.border') self.border = border def write(self, s, indent=0): rs = pprint_thing(s) self.elements.append(' ' * indent + rs) def write_th(self, s, indent=0, tags=None): if self.fmt.col_space is not None and self.fmt.col_space > 0: tags = (tags or "") tags += 'style="min-width: %s;"' % self.fmt.col_space return self._write_cell(s, kind='th', indent=indent, tags=tags) def write_td(self, s, indent=0, tags=None): return self._write_cell(s, kind='td', indent=indent, tags=tags) def _write_cell(self, s, kind='td', indent=0, tags=None): if tags is not None: start_tag = '<%s %s>' % (kind, tags) else: start_tag = '<%s>' % kind if self.escape: # escape & first to prevent double escaping of & esc = OrderedDict([('&', r'&amp;'), ('<', r'&lt;'), ('>', r'&gt;')]) else: esc = {} rs = pprint_thing(s, escape_chars=esc).strip() self.write('%s%s</%s>' % (start_tag, rs, kind), indent) def write_tr(self, line, indent=0, indent_delta=4, header=False, align=None, tags=None, nindex_levels=0): if tags is None: tags = {} if align is None: self.write('<tr>', indent) else: self.write('<tr style="text-align: %s;">' % align, indent) indent += indent_delta for i, s in enumerate(line): val_tag = tags.get(i, None) if header or (self.bold_rows and i < nindex_levels): self.write_th(s, indent, tags=val_tag) else: self.write_td(s, indent, tags=val_tag) indent -= indent_delta self.write('</tr>', indent) def write_result(self, buf): indent = 0 frame = self.frame _classes = ['dataframe'] # Default class. if self.classes is not None: if isinstance(self.classes, str): self.classes = self.classes.split() if not isinstance(self.classes, (list, tuple)): raise AssertionError('classes must be list or tuple, ' 'not %s' % type(self.classes)) _classes.extend(self.classes) if self.notebook: div_style = '' try: import IPython if IPython.__version__ < LooseVersion('3.0.0'): div_style = ' style="max-width:1500px;overflow:auto;"' except (ImportError, AttributeError): pass self.write('<div{0}>'.format(div_style)) self.write('<table border="%s" class="%s">' % (self.border, ' '.join(_classes)), indent) indent += self.indent_delta indent = self._write_header(indent) indent = self._write_body(indent) self.write('</table>', indent) if self.should_show_dimensions: by = chr(215) if compat.PY3 else unichr(215) # × self.write(u('<p>%d rows %s %d columns</p>') % (len(frame), by, len(frame.columns))) if self.notebook: self.write('</div>') _put_lines(buf, self.elements) def _write_header(self, indent): truncate_h = self.fmt.truncate_h row_levels = self.frame.index.nlevels if not self.fmt.header: # write nothing return indent def _column_header(): if self.fmt.index: row = [''] * (self.frame.index.nlevels - 1) else: row = [] if isinstance(self.columns, MultiIndex): if self.fmt.has_column_names and self.fmt.index: row.append(single_column_table(self.columns.names)) else: row.append('') style = "text-align: %s;" % self.fmt.justify row.extend([single_column_table(c, self.fmt.justify, style) for c in self.columns]) else: if self.fmt.index: row.append(self.columns.name or '') row.extend(self.columns) return row self.write('<thead>', indent) row = [] indent += self.indent_delta if isinstance(self.columns, MultiIndex): template = 'colspan="%d" halign="left"' if self.fmt.sparsify: # GH3547 sentinel = com.sentinel_factory() else: sentinel = None levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False) level_lengths = _get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 for lnum, (records, values) in enumerate(zip(level_lengths, levels)): if truncate_h: # modify the header lines ins_col = self.fmt.tr_col_num if self.fmt.sparsify: recs_new = {} # Increment tags after ... col. for tag, span in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span elif tag + span > ins_col: recs_new[tag] = span + 1 if lnum == inner_lvl: values = (values[:ins_col] + (u('...'),) + values[ins_col:]) else: # sparse col headers do not receive a ... values = (values[:ins_col] + (values[ins_col - 1], ) + values[ins_col:]) else: recs_new[tag] = span # if ins_col lies between tags, all col headers # get ... if tag + span == ins_col: recs_new[ins_col] = 1 values = (values[:ins_col] + (u('...'),) + values[ins_col:]) records = recs_new inner_lvl = len(level_lengths) - 1 if lnum == inner_lvl: records[ins_col] = 1 else: recs_new = {} for tag, span in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span else: recs_new[tag] = span recs_new[ins_col] = 1 records = recs_new values = (values[:ins_col] + [u('...')] + values[ins_col:]) name = self.columns.names[lnum] row = [''] * (row_levels - 1) + ['' if name is None else pprint_thing(name)] if row == [""] and self.fmt.index is False: row = [] tags = {} j = len(row) for i, v in enumerate(values): if i in records: if records[i] > 1: tags[j] = template % records[i] else: continue j += 1 row.append(v) self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) else: col_row = _column_header() align = self.fmt.justify if truncate_h: ins_col = row_levels + self.fmt.tr_col_num col_row.insert(ins_col, '...') self.write_tr(col_row, indent, self.indent_delta, header=True, align=align) if self.fmt.has_index_names and self.fmt.index: row = ([x if x is not None else '' for x in self.frame.index.names] + [''] * min(len(self.columns), self.max_cols)) if truncate_h: ins_col = row_levels + self.fmt.tr_col_num row.insert(ins_col, '') self.write_tr(row, indent, self.indent_delta, header=True) indent -= self.indent_delta self.write('</thead>', indent) return indent def _write_body(self, indent): self.write('<tbody>', indent) indent += self.indent_delta fmt_values = {} for i in range(min(len(self.columns), self.max_cols)): fmt_values[i] = self.fmt._format_col(i) # write values if self.fmt.index: if isinstance(self.frame.index, MultiIndex): self._write_hierarchical_rows(fmt_values, indent) else: self._write_regular_rows(fmt_values, indent) else: for i in range(len(self.frame)): row = [fmt_values[j][i] for j in range(len(self.columns))] self.write_tr(row, indent, self.indent_delta, tags=None) indent -= self.indent_delta self.write('</tbody>', indent) indent -= self.indent_delta return indent def _write_regular_rows(self, fmt_values, indent): truncate_h = self.fmt.truncate_h truncate_v = self.fmt.truncate_v ncols = len(self.fmt.tr_frame.columns) nrows = len(self.fmt.tr_frame) fmt = self.fmt._get_formatter('__index__') if fmt is not None: index_values = self.fmt.tr_frame.index.map(fmt) else: index_values = self.fmt.tr_frame.index.format() row = [] for i in range(nrows): if truncate_v and i == (self.fmt.tr_row_num): str_sep_row = ['...' for ele in row] self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=1) row = [] row.append(index_values[i]) row.extend(fmt_values[j][i] for j in range(ncols)) if truncate_h: dot_col_ix = self.fmt.tr_col_num + 1 row.insert(dot_col_ix, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=1) def _write_hierarchical_rows(self, fmt_values, indent): template = 'rowspan="%d" valign="top"' truncate_h = self.fmt.truncate_h truncate_v = self.fmt.truncate_v frame = self.fmt.tr_frame ncols = len(frame.columns) nrows = len(frame) row_levels = self.frame.index.nlevels idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) idx_values = lzip(*idx_values) if self.fmt.sparsify: # GH3547 sentinel = com.sentinel_factory() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) level_lengths = _get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 if truncate_v: # Insert ... row and adjust idx_values and # level_lengths to take this into account. ins_row = self.fmt.tr_row_num for lnum, records in enumerate(level_lengths): rec_new = {} for tag, span in list(records.items()): if tag >= ins_row: rec_new[tag + 1] = span elif tag + span > ins_row: rec_new[tag] = span + 1 dot_row = list(idx_values[ins_row - 1]) dot_row[-1] = u('...') idx_values.insert(ins_row, tuple(dot_row)) else: rec_new[tag] = span # If ins_row lies between tags, all cols idx cols # receive ... if tag + span == ins_row: rec_new[ins_row] = 1 if lnum == 0: idx_values.insert(ins_row, tuple( [u('...')] * len(level_lengths))) level_lengths[lnum] = rec_new level_lengths[inner_lvl][ins_row] = 1 for ix_col in range(len(fmt_values)): fmt_values[ix_col].insert(ins_row, '...') nrows += 1 for i in range(nrows): row = [] tags = {} sparse_offset = 0 j = 0 for records, v in zip(level_lengths, idx_values[i]): if i in records: if records[i] > 1: tags[j] = template % records[i] else: sparse_offset += 1 continue j += 1 row.append(v) row.extend(fmt_values[j][i] for j in range(ncols)) if truncate_h: row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, nindex_levels=len(levels) - sparse_offset) else: for i in range(len(frame)): idx_values = list(zip(*frame.index.format( sparsify=False, adjoin=False, names=False))) row = [] row.extend(idx_values[i]) row.extend(fmt_values[j][i] for j in range(ncols)) if truncate_h: row.insert(row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) def _get_level_lengths(levels, sentinel=''): """For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ---------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). """ if len(levels) == 0: return [] control = [True for x in levels[0]] result = [] for level in levels: last_index = 0 lengths = {} for i, key in enumerate(level): if control[i] and key == sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result class CSVFormatter(object): def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, cols=None, header=True, index=True, index_label=None, mode='w', nanRep=None, encoding=None, compression=None, quoting=None, line_terminator='\n', chunksize=None, tupleize_cols=False, quotechar='"', date_format=None, doublequote=True, escapechar=None, decimal='.'): self.obj = obj if path_or_buf is None: path_or_buf = StringIO() self.path_or_buf = _expand_user(path_or_buf) self.sep = sep self.na_rep = na_rep self.float_format = float_format self.decimal = decimal self.header = header self.index = index self.index_label = index_label self.mode = mode self.encoding = encoding self.compression = compression if quoting is None: quoting = csv.QUOTE_MINIMAL self.quoting = quoting if quoting == csv.QUOTE_NONE: # prevents crash in _csv quotechar = None self.quotechar = quotechar self.doublequote = doublequote self.escapechar = escapechar self.line_terminator = line_terminator self.date_format = date_format self.tupleize_cols = tupleize_cols self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and not self.tupleize_cols) # validate mi options if self.has_mi_columns: if cols is not None: raise TypeError("cannot specify cols with a MultiIndex on the " "columns") if cols is not None: if isinstance(cols, Index): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, Index): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting) else: cols = list(cols) # save it self.cols = cols # preallocate data 2d list self.blocks = self.obj._data.blocks ncols = sum(b.shape[0] for b in self.blocks) self.data = [None] * ncols if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 self.chunksize = int(chunksize) self.data_index = obj.index if isinstance(obj.index, PeriodIndex): self.data_index = obj.index.to_timestamp() if (isinstance(self.data_index, DatetimeIndex) and date_format is not None): self.data_index = Index([x.strftime(date_format) if notnull(x) else '' for x in self.data_index]) self.nlevels = getattr(self.data_index, 'nlevels', 1) if not index: self.nlevels = 0 def save(self): # create the writer & save if hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) close = True try: writer_kwargs = dict(lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) if self.encoding is not None: writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) else: self.writer = csv.writer(f, **writer_kwargs) self._save() finally: if close: f.close() def _save_header(self): writer = self.writer obj = self.obj index_label = self.index_label cols = self.cols has_mi_columns = self.has_mi_columns header = self.header encoded_labels = [] has_aliases = isinstance(header, (tuple, list, np.ndarray, Index)) if not (has_aliases or self.header): return if has_aliases: if len(header) != len(cols): raise ValueError(('Writing %d cols but got %d aliases' % (len(cols), len(header)))) else: write_cols = header else: write_cols = cols if self.index: # should write something for index label if index_label is not False: if index_label is None: if isinstance(obj.index, MultiIndex): index_label = [] for i, name in enumerate(obj.index.names): if name is None: name = '' index_label.append(name) else: index_label = obj.index.name if index_label is None: index_label = [''] else: index_label = [index_label] elif not isinstance(index_label, (list, tuple, np.ndarray, Index)): # given a string for a DF with Index index_label = [index_label] encoded_labels = list(index_label) else: encoded_labels = [] if not has_mi_columns: encoded_labels += list(write_cols) writer.writerow(encoded_labels) else: # write out the mi columns = obj.columns # write out the names for each level, then ALL of the values for # each level for i in range(columns.nlevels): # we need at least 1 index column to write our col names col_line = [] if self.index: # name is the first column col_line.append(columns.names[i]) if isinstance(index_label, list) and len(index_label) > 1: col_line.extend([''] * (len(index_label) - 1)) col_line.extend(columns.get_level_values(i)) writer.writerow(col_line) # Write out the index line if it's not empty. # Otherwise, we will print out an extraneous # blank line between the mi and the data rows. if encoded_labels and set(encoded_labels) != set(['']): encoded_labels.extend([''] * len(columns)) writer.writerow(encoded_labels) def _save(self): self._save_header() nrows = len(self.data_index) # write in chunksize bites chunksize = self.chunksize chunks = int(nrows / chunksize) + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break self._save_chunk(start_i, end_i) def _save_chunk(self, start_i, end_i): data_index = self.data_index # create the data for a chunk slicer = slice(start_i, end_i) for i in range(len(self.blocks)): b = self.blocks[i] d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format, decimal=self.decimal, date_format=self.date_format, quoting=self.quoting) for col_loc, col in zip(b.mgr_locs, d): # self.data is a preallocated list self.data[col_loc] = col ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format, decimal=self.decimal, date_format=self.date_format, quoting=self.quoting) lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) # from collections import namedtuple # ExcelCell = namedtuple("ExcelCell", # 'row, col, val, style, mergestart, mergeend') class ExcelCell(object): __fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend') __slots__ = __fields__ def __init__(self, row, col, val, style=None, mergestart=None, mergeend=None): self.row = row self.col = col self.val = val self.style = style self.mergestart = mergestart self.mergeend = mergeend header_style = {"font": {"bold": True}, "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} class ExcelFormatter(object): """ Class for formatting a DataFrame to a list of ExcelCells, Parameters ---------- df : dataframe na_rep: na representation float_format : string, default None Format string for floating point numbers cols : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True output row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. merge_cells : boolean, default False Format MultiIndex and Hierarchical Rows as merged cells. inf_rep : string, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf. """ def __init__(self, df, na_rep='', float_format=None, cols=None, header=True, index=True, index_label=None, merge_cells=False, inf_rep='inf'): self.rowcounter = 0 self.na_rep = na_rep self.df = df if cols is not None: self.df = df.loc[:, cols] self.columns = self.df.columns self.float_format = float_format self.index = index self.index_label = index_label self.header = header self.merge_cells = merge_cells self.inf_rep = inf_rep def _format_value(self, val): if lib.checknull(val): val = self.na_rep elif is_float(val): if lib.isposinf_scalar(val): val = self.inf_rep elif lib.isneginf_scalar(val): val = '-%s' % self.inf_rep elif self.float_format is not None: val = float(self.float_format % val) return val def _format_header_mi(self): if self.columns.nlevels > 1: if not self.index: raise NotImplementedError("Writing to Excel with MultiIndex" " columns and no index " "('index'=False) is not yet " "implemented.") has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if not (has_aliases or self.header): return columns = self.columns level_strs = columns.format(sparsify=self.merge_cells, adjoin=False, names=False) level_lengths = _get_level_lengths(level_strs) coloffset = 0 lnum = 0 if self.index and isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index[0]) - 1 if self.merge_cells: # Format multi-index as a merged cells. for lnum in range(len(level_lengths)): name = columns.names[lnum] yield ExcelCell(lnum, coloffset, name, header_style) for lnum, (spans, levels, labels) in enumerate(zip( level_lengths, columns.levels, columns.labels)): values = levels.take(labels) for i in spans: if spans[i] > 1: yield ExcelCell(lnum, coloffset + i + 1, values[i], header_style, lnum, coloffset + i + spans[i]) else: yield ExcelCell(lnum, coloffset + i + 1, values[i], header_style) else: # Format in legacy format with dots to indicate levels. for i, values in enumerate(zip(*level_strs)): v = ".".join(map(pprint_thing, values)) yield ExcelCell(lnum, coloffset + i + 1, v, header_style) self.rowcounter = lnum def _format_header_regular(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if has_aliases or self.header: coloffset = 0 if self.index: coloffset = 1 if isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index[0]) colnames = self.columns if has_aliases: if len(self.header) != len(self.columns): raise ValueError('Writing %d cols but got %d aliases' % (len(self.columns), len(self.header))) else: colnames = self.header for colindex, colname in enumerate(colnames): yield ExcelCell(self.rowcounter, colindex + coloffset, colname, header_style) def _format_header(self): if isinstance(self.columns, MultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() gen2 = () if self.df.index.names: row = [x if x is not None else '' for x in self.df.index.names] + [''] * len(self.columns) if reduce(lambda x, y: x and y, map(lambda x: x != '', row)): gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style) for colindex, val in enumerate(row)) self.rowcounter += 1 return itertools.chain(gen, gen2) def _format_body(self): if isinstance(self.df.index, MultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() def _format_regular_rows(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if has_aliases or self.header: self.rowcounter += 1 coloffset = 0 # output index and index_label? if self.index: # chek aliases # if list only take first as this is not a MultiIndex if (self.index_label and isinstance(self.index_label, (list, tuple, np.ndarray, Index))): index_label = self.index_label[0] # if string good to go elif self.index_label and isinstance(self.index_label, str): index_label = self.index_label else: index_label = self.df.index.names[0] if isinstance(self.columns, MultiIndex): self.rowcounter += 1 if index_label and self.header is not False: yield ExcelCell(self.rowcounter - 1, 0, index_label, header_style) # write index_values index_values = self.df.index if isinstance(self.df.index, PeriodIndex): index_values = self.df.index.to_timestamp() coloffset = 1 for idx, idxval in enumerate(index_values): yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style) # Write the body of the frame data series by series. for colidx in range(len(self.columns)): series = self.df.iloc[:, colidx] for i, val in enumerate(series): yield ExcelCell(self.rowcounter + i, colidx + coloffset, val) def _format_hierarchical_rows(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if has_aliases or self.header: self.rowcounter += 1 gcolidx = 0 if self.index: index_labels = self.df.index.names # check for aliases if (self.index_label and isinstance(self.index_label, (list, tuple, np.ndarray, Index))): index_labels = self.index_label # MultiIndex columns require an extra row # with index names (blank if None) for # unambigous round-trip, unless not merging, # in which case the names all go on one row Issue #11328 if isinstance(self.columns, MultiIndex) and self.merge_cells: self.rowcounter += 1 # if index labels are not empty go ahead and dump if (any(x is not None for x in index_labels) and self.header is not False): for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, header_style) if self.merge_cells: # Format hierarchical rows as merged cells. level_strs = self.df.index.format(sparsify=True, adjoin=False, names=False) level_lengths = _get_level_lengths(level_strs) for spans, levels, labels in zip(level_lengths, self.df.index.levels, self.df.index.labels): values = levels.take(labels, allow_fill=levels._can_hold_na, fill_value=True) for i in spans: if spans[i] > 1: yield ExcelCell(self.rowcounter + i, gcolidx, values[i], header_style, self.rowcounter + i + spans[i] - 1, gcolidx) else: yield ExcelCell(self.rowcounter + i, gcolidx, values[i], header_style) gcolidx += 1 else: # Format hierarchical rows with non-merged values. for indexcolvals in zip(*self.df.index): for idx, indexcolval in enumerate(indexcolvals): yield ExcelCell(self.rowcounter + idx, gcolidx, indexcolval, header_style) gcolidx += 1 # Write the body of the frame data series by series. for colidx in range(len(self.columns)): series = self.df.iloc[:, colidx] for i, val in enumerate(series): yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val) def get_formatted_cells(self): for cell in itertools.chain(self._format_header(), self._format_body()): cell.val = self._format_value(cell.val) yield cell # ---------------------------------------------------------------------- # Array formatters def format_array(values, formatter, float_format=None, na_rep='NaN', digits=None, space=None, justify='right', decimal='.'): if is_categorical_dtype(values): fmt_klass = CategoricalArrayFormatter elif is_float_dtype(values.dtype): fmt_klass = FloatArrayFormatter elif is_period_arraylike(values): fmt_klass = PeriodArrayFormatter elif is_integer_dtype(values.dtype): fmt_klass = IntArrayFormatter elif is_datetimetz(values): fmt_klass = Datetime64TZFormatter elif is_datetime64_dtype(values.dtype): fmt_klass = Datetime64Formatter elif is_timedelta64_dtype(values.dtype): fmt_klass = Timedelta64Formatter else: fmt_klass = GenericArrayFormatter if space is None: space = get_option("display.column_space") if float_format is None: float_format = get_option("display.float_format") if digits is None: digits = get_option("display.precision") fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep, float_format=float_format, formatter=formatter, space=space, justify=justify, decimal=decimal) return fmt_obj.get_result() class GenericArrayFormatter(object): def __init__(self, values, digits=7, formatter=None, na_rep='NaN', space=12, float_format=None, justify='right', decimal='.', quoting=None, fixed_width=True): self.values = values self.digits = digits self.na_rep = na_rep self.space = space self.formatter = formatter self.float_format = float_format self.justify = justify self.decimal = decimal self.quoting = quoting self.fixed_width = fixed_width def get_result(self): fmt_values = self._format_strings() return _make_fixed_width(fmt_values, self.justify) def _format_strings(self): if self.float_format is None: float_format = get_option("display.float_format") if float_format is None: fmt_str = '%% .%dg' % get_option("display.precision") float_format = lambda x: fmt_str % x else: float_format = self.float_format formatter = ( self.formatter if self.formatter is not None else (lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n')))) def _format(x): if self.na_rep is not None and lib.checknull(x): if x is None: return 'None' elif x is pd.NaT: return 'NaT' return self.na_rep elif isinstance(x, PandasObject): return '%s' % x else: # object dtype return '%s' % formatter(x) vals = self.values if isinstance(vals, Index): vals = vals._values elif isinstance(vals, ABCSparseArray): vals = vals.values is_float_type = lib.map_infer(vals, is_float) & notnull(vals) leading_space = is_float_type.any() fmt_values = [] for i, v in enumerate(vals): if not is_float_type[i] and leading_space: fmt_values.append(' %s' % _format(v)) elif is_float_type[i]: fmt_values.append(float_format(v)) else: fmt_values.append(' %s' % _format(v)) return fmt_values class FloatArrayFormatter(GenericArrayFormatter): """ """ def __init__(self, *args, **kwargs): GenericArrayFormatter.__init__(self, *args, **kwargs) # float_format is expected to be a string # formatter should be used to pass a function if self.float_format is not None and self.formatter is None: if callable(self.float_format): self.formatter = self.float_format self.float_format = None def _value_formatter(self, float_format=None, threshold=None): """Returns a function to be applied on each value to format it """ # the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format # we are going to compose different functions, to first convert to # a string, then replace the decimal symbol, and finally chop according # to the threshold # when there is no float_format, we use str instead of '%g' # because str(0.0) = '0.0' while '%g' % 0.0 = '0' if float_format: def base_formatter(v): return (float_format % v) if notnull(v) else self.na_rep else: def base_formatter(v): return str(v) if notnull(v) else self.na_rep if self.decimal != '.': def decimal_formatter(v): return base_formatter(v).replace('.', self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notnull(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter def get_result_as_array(self): """ Returns the float values converted into strings using the parameters given at initalisation, as a numpy array """ if self.formatter is not None: return np.array([self.formatter(x) for x in self.values]) if self.fixed_width: threshold = get_option("display.chop_threshold") else: threshold = None # if we have a fixed_width, we'll need to try different float_format def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) # separate the wheat from the chaff values = self.values mask = isnull(values) if hasattr(values, 'to_dense'): # sparse numpy ndarray values = values.to_dense() values = np.array(values, dtype='object') values[mask] = self.na_rep imask = (~mask).ravel() values.flat[imask] = np.array([formatter(val) for val in values.ravel()[imask]]) if self.fixed_width: return _trim_zeros(values, self.na_rep) return values # There is a special default string when we are fixed-width # The default is otherwise to use str instead of a formatting string if self.float_format is None and self.fixed_width: float_format = '%% .%df' % self.digits else: float_format = self.float_format formatted_values = format_values_with(float_format) if not self.fixed_width: return formatted_values # we need do convert to engineering format if some values are too small # and would appear as 0, or if some values are too big and take too # much space if len(formatted_values) > 0: maxlen = max(len(x) for x in formatted_values) too_long = maxlen > self.digits + 6 else: too_long = False with np.errstate(invalid='ignore'): abs_vals = np.abs(self.values) # this is pretty arbitrary for now # large values: more that 8 characters including decimal symbol # and first digit, hence > 1e6 has_large_values = (abs_vals > 1e6).any() has_small_values = ((abs_vals < 10**(-self.digits)) & (abs_vals > 0)).any() if has_small_values or (too_long and has_large_values): float_format = '%% .%de' % self.digits formatted_values = format_values_with(float_format) return formatted_values def _format_strings(self): # shortcut if self.formatter is not None: return [self.formatter(x) for x in self.values] return list(self.get_result_as_array()) class IntArrayFormatter(GenericArrayFormatter): def _format_strings(self): formatter = self.formatter or (lambda x: '% d' % x) fmt_values = [formatter(x) for x in self.values] return fmt_values class Datetime64Formatter(GenericArrayFormatter): def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs): super(Datetime64Formatter, self).__init__(values, **kwargs) self.nat_rep = nat_rep self.date_format = date_format def _format_strings(self): """ we by definition have DO NOT have a TZ """ values = self.values if not isinstance(values, DatetimeIndex): values = DatetimeIndex(values) if self.formatter is not None and callable(self.formatter): return [self.formatter(x) for x in values] fmt_values = format_array_from_datetime( values.asi8.ravel(), format=_get_format_datetime64_from_values(values, self.date_format), na_rep=self.nat_rep).reshape(values.shape) return fmt_values.tolist() class PeriodArrayFormatter(IntArrayFormatter): def _format_strings(self): from pandas.tseries.period import IncompatibleFrequency try: values = PeriodIndex(self.values).to_native_types() except IncompatibleFrequency: # periods may contains different freq values = Index(self.values, dtype='object').to_native_types() formatter = self.formatter or (lambda x: '%s' % x) fmt_values = [formatter(x) for x in values] return fmt_values class CategoricalArrayFormatter(GenericArrayFormatter): def __init__(self, values, *args, **kwargs): GenericArrayFormatter.__init__(self, values, *args, **kwargs) def _format_strings(self): fmt_values = format_array(self.values.get_values(), self.formatter, float_format=self.float_format, na_rep=self.na_rep, digits=self.digits, space=self.space, justify=self.justify) return fmt_values def format_percentiles(percentiles): """ Outputs rounded and formatted percentiles. Parameters ---------- percentiles : list-like, containing floats from interval [0,1] Returns ------- formatted : list of strings Notes ----- Rounding precision is chosen so that: (1) if any two elements of ``percentiles`` differ, they remain different after rounding (2) no entry is *rounded* to 0% or 100%. Any non-integer is always rounded to at least 1 decimal place. Examples -------- Keeps all entries different after rounding: >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] No element is rounded to 0% or 100% (unless already equal to it). Duplicates are allowed: >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] """ percentiles = np.asarray(percentiles) # It checks for np.NaN as well with np.errstate(invalid='ignore'): if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \ or not np.all(percentiles <= 1): raise ValueError("percentiles should all be in the interval [0,1]") percentiles = 100 * percentiles int_idx = (percentiles.astype(int) == percentiles) if np.all(int_idx): out = percentiles.astype(int).astype(str) return [i + '%' for i in out] unique_pcts = np.unique(percentiles) to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None # Least precision that keeps percentiles unique after rounding prec = -np.floor(np.log10(np.min( np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end) ))).astype(int) prec = max(1, prec) out = np.empty_like(percentiles, dtype=object) out[int_idx] = percentiles[int_idx].astype(int).astype(str) out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) return [i + '%' for i in out] def _is_dates_only(values): # return a boolean if we are only dates (and don't have a timezone) values = DatetimeIndex(values) if values.tz is not None: return False values_int = values.asi8 consider_values = values_int != iNaT one_day_nanos = (86400 * 1e9) even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 if even_days: return True return False def _format_datetime64(x, tz=None, nat_rep='NaT'): if x is None or lib.checknull(x): return nat_rep if tz is not None or not isinstance(x, Timestamp): x = Timestamp(x, tz=tz) return str(x) def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None): if x is None or lib.checknull(x): return nat_rep if not isinstance(x, Timestamp): x = Timestamp(x) if date_format: return x.strftime(date_format) else: return x._date_repr def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None): if is_dates_only: return lambda x, tz=None: _format_datetime64_dateonly( x, nat_rep=nat_rep, date_format=date_format) else: return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep) def _get_format_datetime64_from_values(values, date_format): """ given values and a date_format, return a string format """ is_dates_only = _is_dates_only(values) if is_dates_only: return date_format or "%Y-%m-%d" return date_format class Datetime64TZFormatter(Datetime64Formatter): def _format_strings(self): """ we by definition have a TZ """ values = self.values.asobject is_dates_only = _is_dates_only(values) formatter = (self.formatter or _get_format_datetime64(is_dates_only, date_format=self.date_format)) fmt_values = [formatter(x) for x in values] return fmt_values class Timedelta64Formatter(GenericArrayFormatter): def __init__(self, values, nat_rep='NaT', box=False, **kwargs): super(Timedelta64Formatter, self).__init__(values, **kwargs) self.nat_rep = nat_rep self.box = box def _format_strings(self): formatter = (self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep, box=self.box)) fmt_values = np.array([formatter(x) for x in self.values]) return fmt_values def _get_format_timedelta64(values, nat_rep='NaT', box=False): """ Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes """ values_int = values.astype(np.int64) consider_values = values_int != iNaT one_day_nanos = (86400 * 1e9) even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 all_sub_day = np.logical_and( consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0 if even_days: format = 'even_day' elif all_sub_day: format = 'sub_day' else: format = 'long' def _formatter(x): if x is None or lib.checknull(x): return nat_rep if not isinstance(x, Timedelta): x = Timedelta(x) result = x._repr_base(format=format) if box: result = "'{0}'".format(result) return result return _formatter def _make_fixed_width(strings, justify='right', minimum=None, adj=None): if len(strings) == 0 or justify == 'all': return strings if adj is None: adj = _get_adjustment() max_len = np.max([adj.len(x) for x in strings]) if minimum is not None: max_len = max(minimum, max_len) conf_max = get_option("display.max_colwidth") if conf_max is not None and max_len > conf_max: max_len = conf_max def just(x): if conf_max is not None: if (conf_max > 3) & (adj.len(x) > max_len): x = x[:max_len - 3] + '...' return x strings = [just(x) for x in strings] result = adj.justify(strings, max_len, mode=justify) return result def _trim_zeros(str_floats, na_rep='NaN'): """ Trims zeros, leaving just one before the decimal points if need be. """ trimmed = str_floats def _cond(values): non_na = [x for x in values if x != na_rep] return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and not (any([('e' in x) or ('E' in x) for x in non_na]))) while _cond(trimmed): trimmed = [x[:-1] if x != na_rep else x for x in trimmed] # leave one 0 after the decimal points if need be. return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed] def single_column_table(column, align=None, style=None): table = '<table' if align is not None: table += (' align="%s"' % align) if style is not None: table += (' style="%s"' % style) table += '><tbody>' for i in column: table += ('<tr><td>%s</td></tr>' % str(i)) table += '</tbody></table>' return table def single_row_table(row): # pragma: no cover table = '<table><tbody><tr>' for i in row: table += ('<td>%s</td>' % str(i)) table += '</tr></tbody></table>' return table def _has_names(index): if isinstance(index, MultiIndex): return any([x is not None for x in index.names]) else: return index.name is not None # ----------------------------------------------------------------------------- # Global formatting options _initial_defencoding = None def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slighly modified from the way IPython handles the same issue. """ import locale global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except AttributeError: pass # try again for something better if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except Exception: pass # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() # GH3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding def get_console_size(): """Return console size as tuple = (width, height). Returns (None,None) in non-interactive session. """ display_width = get_option('display.width') # deprecated. display_height = get_option('display.height', silent=True) # Consider # interactive shell terminal, can detect term size # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term # size non-interactive script, should disregard term size # in addition # width,height have default values, but setting to 'None' signals # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. if com.in_interactive_session(): if com.in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas.core.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.height') else: # pure terminal terminal_width, terminal_height = get_terminal_size() else: terminal_width, terminal_height = None, None # Note if the User sets width/Height to None (auto-detection) # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height) class EngFormatter(object): """ Formats float values according to engineering format. Based on matplotlib.ticker.EngFormatter """ # The SI engineering prefixes ENG_PREFIXES = { -24: "y", -21: "z", -18: "a", -15: "f", -12: "p", -9: "n", -6: "u", -3: "m", 0: "", 3: "k", 6: "M", 9: "G", 12: "T", 15: "P", 18: "E", 21: "Z", 24: "Y" } def __init__(self, accuracy=None, use_eng_prefix=False): self.accuracy = accuracy self.use_eng_prefix = use_eng_prefix def __call__(self, num): """ Formats a number in engineering notation, appending a letter representing the power of 1000 of the original number. Some examples: >>> format_eng(0) # for self.accuracy = 0 ' 0' >>> format_eng(1000000) # for self.accuracy = 1, # self.use_eng_prefix = True ' 1.0M' >>> format_eng("-1e-6") # for self.accuracy = 2 # self.use_eng_prefix = False '-1.00E-06' @param num: the value to represent @type num: either a numeric value or a string that can be converted to a numeric value (as per decimal.Decimal constructor) @return: engineering formatted string """ import decimal import math dnum = decimal.Decimal(str(num)) if decimal.Decimal.is_nan(dnum): return 'NaN' if decimal.Decimal.is_infinite(dnum): return 'inf' sign = 1 if dnum < 0: # pragma: no cover sign = -1 dnum = -dnum if dnum != 0: pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3)) else: pow10 = decimal.Decimal(0) pow10 = pow10.min(max(self.ENG_PREFIXES.keys())) pow10 = pow10.max(min(self.ENG_PREFIXES.keys())) int_pow10 = int(pow10) if self.use_eng_prefix: prefix = self.ENG_PREFIXES[int_pow10] else: if int_pow10 < 0: prefix = 'E-%02d' % (-int_pow10) else: prefix = 'E+%02d' % int_pow10 mant = sign * dnum / (10**pow10) if self.accuracy is None: # pragma: no cover format_str = u("% g%s") else: format_str = (u("%% .%if%%s") % self.accuracy) formatted = format_str % (mant, prefix) return formatted # .strip() def set_eng_float_format(accuracy=3, use_eng_prefix=False): """ Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of decimal digits after the floating point. See also EngFormatter. """ set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9)) def _put_lines(buf, lines): if any(isinstance(x, compat.text_type) for x in lines): lines = [compat.text_type(x) for x in lines] buf.write('\n'.join(lines)) def _binify(cols, line_width): adjoin_width = 1 bins = [] curr_width = 0 i_last_column = len(cols) - 1 for i, w in enumerate(cols): w_adjoined = w + adjoin_width curr_width += w_adjoined if i_last_column == i: wrap = curr_width + 1 > line_width and i > 0 else: wrap = curr_width + 2 > line_width and i > 0 if wrap: bins.append(i) curr_width = w_adjoined bins.append(len(cols)) return bins if __name__ == '__main__': arr = np.array([746.03, 0.00, 5620.00, 1592.36]) # arr = np.array([11111111.1, 1.55]) # arr = [314200.0034, 1.4125678] arr = np.array( [327763.3119, 345040.9076, 364460.9915, 398226.8688, 383800.5172, 433442.9262, 539415.0568, 568590.4108, 599502.4276, 620921.8593, 620898.5294, 552427.1093, 555221.2193, 519639.7059, 388175.7, 379199.5854, 614898.25, 504833.3333, 560600., 941214.2857, 1134250., 1219550., 855736.85, 1042615.4286, 722621.3043, 698167.1818, 803750.]) fmt = FloatArrayFormatter(arr, digits=7) print(fmt.get_result())
mit
fulmicoton/pylearn2
pylearn2/train_extensions/tests/test_wmape_channel.py
32
2531
""" Tests for WMAPE. """ from pylearn2.config import yaml_parse from pylearn2.testing.skip import skip_if_no_sklearn from theano.compile import function import numpy as np from numpy.testing import assert_allclose def test_wmape(): """Test WMapeChannel.""" skip_if_no_sklearn() trainer = yaml_parse.load(test_yaml) trainer.main_loop() X = trainer.model.get_input_space().make_theano_batch() Y = trainer.model.fprop(X) f = function([X], Y, allow_input_downcast=True) y_hat = f(trainer.dataset.X) wmape_num_exp = abs(trainer.dataset.y - y_hat).sum() wmape_den_exp = abs(trainer.dataset.y).sum() exp_array = np.asarray([wmape_num_exp, wmape_den_exp]) wmape_num_real = trainer.model.monitor.channels['train_wmape_num'].\ val_record wmape_den_real = trainer.model.monitor.channels['train_wmape_den'].\ val_record real_array = np.asarray([wmape_num_real[-1], wmape_den_real[-1]]) assert_allclose(exp_array, real_array) test_yaml = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.testing.datasets.\ random_dense_design_matrix_for_regression { rng: !obj:numpy.random.RandomState { seed: 1 }, num_examples: 10, dim: 10, reg_min: 1, reg_max: 1000 }, model: !obj:pylearn2.models.mlp.MLP { nvis: 10, layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 10, irange: 0.05, }, !obj:pylearn2.models.mlp.Linear { layer_name: y, dim: 1, irange: 0., } ], }, algorithm: !obj:pylearn2.training_algorithms.bgd.BGD { monitoring_dataset: { 'train': *train, }, batches_per_iter: 1, monitoring_batches: 1, termination_criterion: !obj:pylearn2.termination_criteria.And { criteria: [ !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, !obj:pylearn2.termination_criteria.MonitorBased { channel_name: train_wmape_num, prop_decrease: 0., N: 1, }, ], }, }, extensions: [ !obj:pylearn2.train_extensions.wmape_channel.WMapeNumeratorChannel {}, !obj:pylearn2.train_extensions.wmape_channel.\ WMapeDenominatorChannel {}, ], } """
bsd-3-clause
sinkap/trappy
trappy/plotter/Constraint.py
1
12189
# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module provides the Constraint class for handling filters and pivots in a modular fashion. This enable easy constraint application. An implementation of :mod:`trappy.plotter.AbstractDataPlotter` is expected to use the :mod:`trappy.plotter.Constraint.ConstraintManager` class to pivot and filter data and handle multiple column, trace and event inputs. The underlying object that encapsulates a unique set of a data column, data event and the requisite filters is :mod:`trappy.plotter.Constraint.Constraint` """ # pylint: disable=R0913 from trappy.plotter.Utils import decolonize, normalize_list from trappy.utils import listify from trappy.plotter import AttrConf class Constraint(object): """ What is a Constraint? It is collection of data based on two rules: - A Pivot - A Set of Filters - A Data Column For Example a :mod:`pandas.DataFrame` ===== ======== ========= Time CPU Latency ===== ======== ========= 1 x <val> 2 y <val> 3 z <val> 4 a <val> ===== ======== ========= The resultant data will be split for each unique pivot value with the filters applied :: result["x"] = pd.Series.filtered() result["y"] = pd.Series.filtered() result["z"] = pd.Series.filtered() result["a"] = pd.Series.filtered() :param trappy_trace: Input Data :type trappy_trace: :mod:`pandas.DataFrame` or a class derived from :mod:`trappy.trace.BareTrace` :param column: The data column :type column: str :param template: TRAPpy Event :type template: :mod:`trappy.base.Base` event :param trace_index: The index of the trace/data in the overall constraint data :type trace_index: int :param filters: A dictionary of filter values :type filters: dict :param window: A time window to apply to the constraint. E.g. window=(5, 20) will constraint to events that happened between Time=5 to Time=20. :type window: tuple of two ints """ def __init__(self, trappy_trace, pivot, column, template, trace_index, filters, window): self._trappy_trace = trappy_trace self._filters = filters self._pivot = pivot self.column = column self._template = template self._dup_resolved = False self._data = self.populate_data_frame() if window: # We want to include the previous value before the window # and the next after the window in the dataset min_idx = self._data.loc[:window[0]].index.max() max_idx = self._data.loc[window[1]:].index.min() self._data = self._data.loc[min_idx:max_idx] self.result = self._apply() self.trace_index = trace_index def _apply(self): """This method applies the filter on the resultant data on the input column. """ data = self._data result = {} try: values = data[self.column] except KeyError: return result if self._pivot == AttrConf.PIVOT: pivot_vals = [AttrConf.PIVOT_VAL] else: pivot_vals = self.pivot_vals(data) for pivot_val in pivot_vals: criterion = values.map(lambda x: True) for key in self._filters.keys(): if key != self._pivot and key in data.columns: criterion = criterion & data[key].map( lambda x: x in self._filters[key]) if pivot_val != AttrConf.PIVOT_VAL: criterion &= data[self._pivot] == pivot_val val_series = values[criterion] if len(val_series) != 0: result[pivot_val] = val_series return result def _uses_trappy_trace(self): if not self._template: return False else: return True def populate_data_frame(self): """Return the populated :mod:`pandas.DataFrame`""" if not self._uses_trappy_trace(): return self._trappy_trace data_container = getattr( self._trappy_trace, decolonize(self._template.name)) return data_container.data_frame def pivot_vals(self, data): """This method returns the unique pivot values for the Constraint's pivot and the column :param data: Input Data :type data: :mod:`pandas.DataFrame` """ if self._pivot == AttrConf.PIVOT: return AttrConf.PIVOT_VAL if self._pivot not in data.columns: return [] pivot_vals = set(data[self._pivot]) if self._pivot in self._filters: pivot_vals = pivot_vals & set(self._filters[self._pivot]) return list(pivot_vals) def __str__(self): name = self.get_data_name() if not self._uses_trappy_trace(): return name + ":" + self.column return name + ":" + \ self._template.name + ":" + self.column def get_data_name(self): """Get name for the data member. This method relies on the "name" attribute for the name. If the name attribute is absent, it associates a numeric name to the respective data element :returns: The name of the data member """ if self._uses_trappy_trace(): if self._trappy_trace.name != "": return self._trappy_trace.name else: return "Trace {}".format(self.trace_index) else: return "DataFrame {}".format(self.trace_index) class ConstraintManager(object): """A class responsible for converting inputs to constraints and also ensuring sanity :param traces: Input Trace data :type traces: :mod:`trappy.trace.BareTrace`, list(:mod:`trappy.trace.BareTrace`) (or a class derived from :mod:`trappy.trace.BareTrace`) :param columns: The column values from the corresponding :mod:`pandas.DataFrame` :type columns: str, list(str) :param pivot: The column around which the data will be pivoted: :type pivot: str :param templates: TRAPpy events :type templates: :mod:`trappy.base.Base` :param filters: A dictionary of values to be applied on the respective columns :type filters: dict :param window: A time window to apply to the constraints :type window: tuple of ints :param zip_constraints: Permutes the columns and traces instead of a one-to-one correspondence :type zip_constraints: bool """ def __init__(self, traces, columns, templates, pivot, filters, window=None, zip_constraints=True): self._ip_vec = [] self._ip_vec.append(listify(traces)) self._ip_vec.append(listify(columns)) self._ip_vec.append(listify(templates)) self._lens = map(len, self._ip_vec) self._max_len = max(self._lens) self._pivot = pivot self._filters = filters self.window = window self._constraints = [] self._trace_expanded = False self._expand() if zip_constraints: self._populate_zip_constraints() else: self._populate_constraints() def _expand(self): """This is really important. We need to meet the following criteria for constraint expansion: :: Len[traces] == Len[columns] == Len[templates] Or: :: Permute( Len[traces] = 1 Len[columns] = 1 Len[templates] != 1 ) Permute( Len[traces] = 1 Len[columns] != 1 Len[templates] != 1 ) """ min_len = min(self._lens) max_pos_comp = [ i for i, j in enumerate( self._lens) if j != self._max_len] if self._max_len == 1 and min_len != 1: raise RuntimeError("Essential Arg Missing") if self._max_len > 1: # Are they all equal? if len(set(self._lens)) == 1: return if min_len > 1: raise RuntimeError("Cannot Expand a list of Constraints") for val in max_pos_comp: if val == 0: self._trace_expanded = True self._ip_vec[val] = normalize_list(self._max_len, self._ip_vec[val]) def _populate_constraints(self): """Populate the constraints creating one for each column in each trace In a multi-trace, multicolumn scenario, constraints are created for all the columns in each of the traces. _populate_constraints() creates one constraint for the first trace and first column, the next for the second trace and second column,... This function creates a constraint for every combination of traces and columns possible. """ for trace_idx, trace in enumerate(self._ip_vec[0]): for col in self._ip_vec[1]: template = self._ip_vec[2][trace_idx] constraint = Constraint(trace, self._pivot, col, template, trace_idx, self._filters, self.window) self._constraints.append(constraint) def get_column_index(self, constraint): return self._ip_vec[1].index(constraint.column) def _populate_zip_constraints(self): """Populate the expanded constraints In a multitrace, multicolumn scenario, create constraints for the first trace and the first column, second trace and second column,... that is, as if you run zip(traces, columns) """ for idx in range(self._max_len): if self._trace_expanded: trace_idx = 0 else: trace_idx = idx trace = self._ip_vec[0][idx] col = self._ip_vec[1][idx] template = self._ip_vec[2][idx] self._constraints.append( Constraint(trace, self._pivot, col, template, trace_idx, self._filters, self.window)) def generate_pivots(self, permute=False): """Return a union of the pivot values :param permute: Permute the Traces and Columns :type permute: bool """ pivot_vals = [] for constraint in self._constraints: pivot_vals += constraint.result.keys() p_list = list(set(pivot_vals)) traces = range(self._lens[0]) try: sorted_plist = sorted(p_list, key=int) except (ValueError, TypeError): try: sorted_plist = sorted(p_list, key=lambda x: int(x, 16)) except (ValueError, TypeError): sorted_plist = sorted(p_list) if permute: pivot_gen = ((trace_idx, pivot) for trace_idx in traces for pivot in sorted_plist) return pivot_gen, len(sorted_plist) * self._lens[0] else: return sorted_plist, len(sorted_plist) def constraint_labels(self): """ :return: string to represent the set of Constraints """ return map(str, self._constraints) def __len__(self): return len(self._constraints) def __iter__(self): return iter(self._constraints)
apache-2.0
saiwing-yeung/scikit-learn
examples/calibration/plot_calibration.py
66
4795
""" ====================================== Probability calibration of classifiers ====================================== When performing classification you often want to predict not only the class label, but also the associated probability. This probability gives you some kind of confidence on the prediction. However, not all classifiers provide well-calibrated probabilities, some being over-confident while others being under-confident. Thus, a separate calibration of predicted probabilities is often desirable as a postprocessing. This example illustrates two different methods for this calibration and evaluates the quality of the returned probabilities using Brier's score (see https://en.wikipedia.org/wiki/Brier_score). Compared are the estimated probability using a Gaussian naive Bayes classifier without calibration, with a sigmoid calibration, and with a non-parametric isotonic calibration. One can observe that only the non-parametric model is able to provide a probability calibration that returns probabilities close to the expected 0.5 for most of the samples belonging to the middle cluster with heterogeneous labels. This results in a significantly improved Brier score. """ print(__doc__) # Author: Mathieu Blondel <mathieu@mblondel.org> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Balazs Kegl <balazs.kegl@gmail.com> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD Style. import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from sklearn.datasets import make_blobs from sklearn.naive_bayes import GaussianNB from sklearn.metrics import brier_score_loss from sklearn.calibration import CalibratedClassifierCV from sklearn.model_selection import train_test_split n_samples = 50000 n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here # Generate 3 blobs with 2 classes where the second blob contains # half positive samples and half negative samples. Probability in this # blob is therefore 0.5. centers = [(-5, -5), (0, 0), (5, 5)] X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0, centers=centers, shuffle=False, random_state=42) y[:n_samples // 2] = 0 y[n_samples // 2:] = 1 sample_weight = np.random.RandomState(42).rand(y.shape[0]) # split train, test for calibration X_train, X_test, y_train, y_test, sw_train, sw_test = \ train_test_split(X, y, sample_weight, test_size=0.9, random_state=42) # Gaussian Naive-Bayes with no calibration clf = GaussianNB() clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights prob_pos_clf = clf.predict_proba(X_test)[:, 1] # Gaussian Naive-Bayes with isotonic calibration clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic') clf_isotonic.fit(X_train, y_train, sw_train) prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1] # Gaussian Naive-Bayes with sigmoid calibration clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid') clf_sigmoid.fit(X_train, y_train, sw_train) prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1] print("Brier scores: (the smaller the better)") clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test) print("No calibration: %1.3f" % clf_score) clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test) print("With isotonic calibration: %1.3f" % clf_isotonic_score) clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test) print("With sigmoid calibration: %1.3f" % clf_sigmoid_score) ############################################################################### # Plot the data and the predicted probabilities plt.figure() y_unique = np.unique(y) colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size)) for this_y, color in zip(y_unique, colors): this_X = X_train[y_train == this_y] this_sw = sw_train[y_train == this_y] plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5, label="Class %s" % this_y) plt.legend(loc="best") plt.title("Data") plt.figure() order = np.lexsort((prob_pos_clf, )) plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score) plt.plot(prob_pos_isotonic[order], 'g', linewidth=3, label='Isotonic calibration (%1.3f)' % clf_isotonic_score) plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3, label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score) plt.plot(np.linspace(0, y_test.size, 51)[1::2], y_test[order].reshape(25, -1).mean(1), 'k', linewidth=3, label=r'Empirical') plt.ylim([-0.05, 1.05]) plt.xlabel("Instances sorted according to predicted probability " "(uncalibrated GNB)") plt.ylabel("P(y=1)") plt.legend(loc="upper left") plt.title("Gaussian naive Bayes probabilities") plt.show()
bsd-3-clause
abhitopia/tensorflow
tensorflow/examples/learn/boston.py
33
1981
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNRegressor for Housing dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import datasets from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing import tensorflow as tf def main(unused_argv): # Load dataset boston = datasets.load_boston() x, y = boston.data, boston.target # Split dataset into train / test x_train, x_test, y_train, y_test = model_selection.train_test_split( x, y, test_size=0.2, random_state=42) # Scale data (training set) to 0 mean and unit standard deviation. scaler = preprocessing.StandardScaler() x_train = scaler.fit_transform(x_train) # Build 2 layer fully connected DNN with 10, 10 units respectively. feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input( x_train) regressor = tf.contrib.learn.DNNRegressor( feature_columns=feature_columns, hidden_units=[10, 10]) # Fit regressor.fit(x_train, y_train, steps=5000, batch_size=1) # Transform x_transformed = scaler.transform(x_test) # Predict and score y_predicted = list(regressor.predict(x_transformed, as_iterable=True)) score = metrics.mean_squared_error(y_predicted, y_test) print('MSE: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
droundy/deft
papers/fuzzy-fmt/compute-isotherm.py
1
3963
#!/usr/bin/python2 #This program runs figs/new-melting.cpp for many different densities #ie. figs/new-melting.mkdat --kT 0.5 --n 1.06 --gwstart 0.01 --gwend 0.2 --gwstep 0.01 --fv 0.01 --dx 0.5 --mc-error 0.001 --mc-constant 5 --mc-prefactor 50000 --filename isotherm-kT-0.5_tensor.dat --tensor #NOTE: Run this plot script from directory deft/papers/fuzzy-fmt #with comand ./compute-isotherm.py --kT [temp] --nmin [starting density] --nmax [ending density] --tensor(optional) #For list of the many other options enter ./compute-isotherm.py --help import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt import os import argparse parser = argparse.ArgumentParser(description='Creates data for plots of gw vs n and FE vs n.') parser.add_argument('--kT', metavar='temperature', type=float, help='reduced temperature - REQUIRED') parser.add_argument('--nmin', type=float, help='min density - REQUIRED') parser.add_argument('--nmax', type=float, help='max density - REQUIRED') parser.add_argument('--dn', type=float, help='change of density', default=0.01) parser.add_argument('--dgw', type=float, help='change in gw', default=0.01) parser.add_argument('--maxgw', type=float, help='max gw', default=0.2) parser.add_argument('--mingw', type=float, help='min gw', default=0.01) parser.add_argument('--fv', metavar='vacancies', type=float, help='fraction of vacancies - Default 0') parser.add_argument('--gw', metavar='width', type=float, help='width of Gaussian - Default 0.01') parser.add_argument('--dx', metavar='dx', type=float, help='scaling dx - Default 0.5') parser.add_argument('--mcerror', metavar='mc_error', type=float, help='monte carlo mc_error - Default 0.001') parser.add_argument('--mcconstant', metavar='const', type=int, help='monte carlo integration mc_constant - Default 5') parser.add_argument('--mcprefactor', metavar='prefac', type=int, help='monte carlo integration mc_prefactor - Default 50000') parser.add_argument('--tensor', action='store_true', help='use tensor weight') args=parser.parse_args() kT=args.kT if args.dn: dn=args.dn else : dn=0.01 if args.dgw: dgw=args.dgw else : dgw=0.01 if args.maxgw: maxgw=args.maxgw else : maxgw=0.2 if args.mingw: mingw=args.mingw else : mingw=0.01 if args.fv: fv=args.fv else : fv=0 if args.dx: dx=args.dx else : dx=.5 if args.mcerror: mcerror=args.mcerror else : mcerror=0.001 if args.mcconstant: mcconstant=args.mcconstant else : mcconstant=5 if args.mcprefactor: mcprefactor=args.mcprefactor else : mcprefactor=50000 if args.tensor: for n in np.arange(args.nmin, args.nmax, dn): cmd = 'rq run -J isotherm-kT%g-n%g-tensor' % (kT, n) cmd += ' figs/new-melting.mkdat --kT %g --n %g' % (kT, n) cmd += ' --gwstart %g --gwend %g --gwstep %g' % (mingw, maxgw, dgw) cmd += ' --fv %g --dx %g' % (fv, dx) cmd += ' --mc-error %g --mc-constant %g --mc-prefactor %g' % (mcerror, mcconstant, mcprefactor) cmd += ' --filename isotherm-kT-%g_tensor.dat' % kT cmd += ' --tensor' print(cmd) os.system(cmd) else : for n in np.arange(args.nmin, args.nmax, dn): cmd = 'rq run -J isotherm-kT%g-n%g' % (kT, n) cmd += ' figs/new-melting.mkdat --kT %g --n %g' % (kT, n) cmd += ' --gwstart %g --gwend %g --gwstep %g' % (mingw, maxgw, dgw) cmd += ' --fv %g --dx %g' % (fv, dx) cmd += ' --mc-error %g --mc-constant %g --mc-prefactor %g' % (mcerror, mcconstant, mcprefactor) cmd += ' --filename isotherm-kT-%g.dat' % kT print(cmd) os.system(cmd)
gpl-2.0
dendisuhubdy/tensorflow
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
137
2219
# encoding: utf-8 # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Categorical tests.""" # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS from tensorflow.contrib.learn.python.learn.preprocessing import categorical from tensorflow.python.platform import test class CategoricalTest(test.TestCase): """Categorical tests.""" def testSingleCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor(min_frequency=1) x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"], [1], ["0"], [np.nan], [3]]) self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]]) def testSingleCategoricalProcessorPandasSingleDF(self): if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top cat_processor = categorical.CategoricalProcessor() data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]}) x = list(cat_processor.fit_transform(data)) self.assertAllEqual(list(x), [[1], [2], [1]]) def testMultiCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor( min_frequency=0, share=False) x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]]) self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]]) if __name__ == "__main__": test.main()
apache-2.0
avmarchenko/exatomic
exatomic/gaussian/output.py
3
30472
# -*- coding: utf-8 -*- # Copyright (c) 2015-2018, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Gaussian Output Editor ######################### Editor classes for various types of Gaussian output files """ from __future__ import absolute_import from __future__ import print_function from __future__ import division import re import six import numpy as np import pandas as pd from collections import defaultdict from exa import TypedMeta from exa.util.units import Length, Energy from .editor import Editor from exatomic.base import z2sym from exatomic.core.frame import compute_frame_from_atom from exatomic.core.frame import Frame from exatomic.core.atom import Atom, Frequency from exatomic.core.basis import (BasisSet, BasisSetOrder, Overlap, deduplicate_basis_sets) from exatomic.core.orbital import Orbital, MOMatrix, Excitation from exatomic.algorithms.basis import lmap, lorder from numba import jit @jit(nopython=True, nogil=True, cache=True) def _triangular_indices(ncol, nbas): dim = nbas * (nbas + 1) // 2 idx = np.empty((dim, 3), dtype=np.int64) cnt = 0 for i in range(ncol): for j in range(i, nbas, ncol): for k in range(j, nbas): idx[cnt,0] = j idx[cnt,1] = k idx[cnt,2] = 0 cnt += 1 return idx class GauMeta(TypedMeta): atom = Atom basis_set = BasisSet orbital = Orbital momatrix = MOMatrix basis_set_order = BasisSetOrder frame = Frame excitation = Excitation frequency = Frequency overlap = Overlap multipole = pd.DataFrame class Output(six.with_metaclass(GauMeta, Editor)): def _parse_triangular_matrix(self, regex, column='coef', values_only=False): _rebas01 = r'basis functions,' found = self.find_next(_rebas01, keys_only=True) nbas = int(self[found].split()[0]) found = self.find_next(regex, keys_only=True) if not found: return ncol = len(self[found + 1].split()) start = found + 2 rmdr = nbas % ncol skips = np.array(list(reversed(range(rmdr, nbas + max(1, rmdr), ncol)))) skips = np.cumsum(skips) + np.arange(len(skips)) stop = start + skips[-1] matrix = self.pandas_dataframe(start, stop, ncol + 1, index_col=0, skiprows=skips, ).unstack().dropna().apply( lambda x: x.replace('D', 'E') ).astype(np.float64).values if values_only: return matrix idxs = _triangular_indices(ncol, nbas) return pd.DataFrame.from_dict({'chi0': idxs[:,0], 'chi1': idxs[:,1], 'frame': idxs[:,2], column: matrix}) def parse_atom(self): # Atom flags _regeom01 = 'Input orientation' _regeom02 = 'Standard orientation' # Find our data found = self.find(_regeom01, _regeom02, keys_only=True) # Check if nosymm was specified key = _regeom02 if found[_regeom02] else _regeom01 starts = np.array(found[key]) + 5 # Prints converged geometry twice but only need it once starts = starts[:-1] if len(starts) > 1 else starts stop = starts[0] # Find where the data stops while '-------' not in self[stop]: stop += 1 # But it should be same sized array each time stops = starts + (stop - starts[0]) dfs = [] # Iterate over frames for i, (start, stop) in enumerate(zip(starts, stops)): atom = self.pandas_dataframe(start, stop, 6) atom['frame'] = i dfs.append(atom) atom = pd.concat(dfs).reset_index(drop=True) # Drop the column of atomic type (whatever that is) atom.drop([2], axis=1, inplace=True) # Name the data atom.columns = ['set', 'Z', 'x', 'y', 'z', 'frame'] # Zero-based indexing atom['set'] -= 1 # Convert to atomic units atom['x'] *= Length['Angstrom', 'au'] atom['y'] *= Length['Angstrom', 'au'] atom['z'] *= Length['Angstrom', 'au'] # Map atomic symbols onto Z numbers atom['symbol'] = atom['Z'].map(z2sym) self.atom = atom def parse_basis_set(self): # Basis flags _rebas02 = 'AO basis set in the form of general basis input' _rebas03 = ' (Standard|General) basis' _basrep = {'D 0': 'D0 ', 'F 0': 'F0 ', 'G 0': 'G0 ', 'H 0': 'H0 ', 'I 0': 'I0 '} _rebaspat = re.compile('|'.join(_basrep.keys())) # Find the basis set found = self.regex(_rebas02, _rebas03, keys_only=True) if not found[_rebas02]: return start = stop = found[_rebas02][0] + 1 while self[stop].strip(): stop += 1 # Raw data df = self.pandas_dataframe(start, stop, 4) def _padx(srs): return [0] + srs.tolist() + [df.shape[0]] # Get some indices for appropriate columns setdx = _padx(df[0][df[0] == '****'].index) shldx = _padx(df[3][~np.isnan(df[3])].index) lindx = df[0][df[0].str.lower().isin(lorder + ['sp'])] # Populate the df df['L'] = lindx.str.lower().map(lmap) df['L'] = df['L'].fillna(method='ffill').fillna( method='bfill').astype(np.int64) df['center'] = np.concatenate([np.repeat(i, stop - start) for i, (start, stop) in enumerate(zip(setdx, setdx[1:]))]) df['shell'] = np.concatenate([np.repeat(i-1, stop - start) for i, (start, stop) in enumerate(zip(shldx, shldx[1:]))]) # Complicated way to get shells but it is flat maxshl = df.groupby('center').apply(lambda x: x.shell.max() + 1) maxshl.index += 1 maxshl[0] = 0 df['shell'] = df['shell'] - df['center'].map(maxshl) # Drop all the garbage todrop = setdx[:-1] + [i+1 for i in setdx[:-2]] + lindx.index.tolist() df.drop(todrop, inplace=True) # Keep cleaning if df[0].dtype == 'object': df[0] = df[0].str.replace('D', 'E').astype(np.float64) if df[1].dtype == 'object': df[1] = df[1].str.replace('D', 'E').astype(np.float64) try: sp = np.isnan(df[2]).sum() == df.shape[0] except TypeError: df[2] = df[2].str.replace('D', 'E').astype(np.float64) sp = True df.rename(columns={0: 'alpha', 1: 'd'}, inplace=True) # Deduplicate basis sets and expand 'SP' shells if present df, setmap = deduplicate_basis_sets(df, sp=sp) spherical = '5D' in self[found[_rebas03][0]] if df['L'].max() < 2: spherical = True self.basis_set = BasisSet(df) self.meta['spherical'] = spherical self.atom['set'] = self.atom['set'].map(setmap) def parse_orbital(self): _rebas01 = r'basis functions,' # Orbital flags _realphaelec = 'alpha electrons' _reorb01 = '(?=Alpha|Beta).*(?=occ|virt)' _reorb02 = 'Orbital symmetries' _orbslice = [slice(10 * i, 10 * i + 9) for i in range(5)] _symrep = {'Occupied': '', 'Virtual': '', 'Alpha Orbitals:': '', 'Beta Orbitals:': '', '\(': '', '\)': ''} _resympat = re.compile('|'.join(_symrep.keys())) _symrep['('] = '' _symrep[')'] = '' # Find where our data is found = self.regex(_reorb01, _reorb02, _rebas01, _realphaelec) # If no orbital energies, quit if not found[_reorb01]: return # Check if open shell os = any(('Beta' in ln for lno, ln in found[_reorb01])) #UNUSED? #occ = 1 if os else 2 # Find number of electrons ae, x, x, be, x, x = found[_realphaelec][0][1].split() ae, be = int(ae), int(be) # Get orbital energies ens = '\n'.join([ln.split('-- ')[1] for i, ln in found[_reorb01]]) ens = pd.read_fwf(six.StringIO(ens), header=None, widths=np.repeat(10, 5)).stack().values # Other arrays orbital = Orbital.from_energies(ens, ae, be, os=os) # Symmetry labels if found[_reorb02]: # Gaussian seems to print out a lot of these blocks # maybe a better way to deal with this allsyms = [] match = ['(', 'Orbitals'] for i, (start, ln) in enumerate(found[_reorb02]): # Find the start, stop indices for each block while match[0] not in self[start]: start += 1 stop = start + 1 while any((i in self[stop] for i in match)): stop += 1 # Clean up the text block so it is just symmetries syms = _resympat.sub(lambda m: _symrep[m.group(0)], ' '.join([i.strip() for i in self[start:stop]])).split() # cat the syms for each block together allsyms += syms # Add it to our dataframe orbital['symmetry'] = allsyms[-orbital.shape[0]:] self.orbital = orbital def parse_momatrix(self): """ Parses the MO matrix if asked for in the input. Note: Requires specification of pop(full) or pop(no) or the like. """ if hasattr(self, '_momatrix'): return _rebas01 = r'basis functions,' # MOMatrix flags _remomat01 = r'pop.*(?=full|no)' _remomat02 = 'Orbital Coefficients' _basrep = {'D 0': 'D0 ', 'F 0': 'F0 ', 'G 0': 'G0 ', 'H 0': 'H0 ', 'I 0': 'I0 '} _rebaspat = re.compile('|'.join(_basrep.keys())) # Check if a full MO matrix was specified in the input check = self.regex(_remomat01, stop=1000, flags=re.IGNORECASE) if not check: return # Find approximately where our data is found = self.find(_remomat02, _rebas01) # Get some dimensions ndim = len(found[_remomat02]) # If something goes wrong if not ndim: return nbas = int(found[_rebas01][0][1].split()[0]) nblocks = np.int64(np.ceil(nbas / 5)) # Allocate a big ol' array coefs = np.empty((nbas ** 2, ndim), dtype=np.float64) # Dynamic column generation hasn't been worked out yet colnames = ['coef'] + ['coef' + str(i) for i in range(1, ndim)] # Iterate over where the data was found # c counts the column in the resulting momatrix table _csv_args = {'delim_whitespace': True, 'header': None} for c, (lno, ln) in enumerate(found[_remomat02]): gap = 0 while not 'eigenvalues' in self[lno + gap].lower(): gap += 1 start = lno + gap + 1 stop = start + nbas # The basis set order is printed with every chunk of eigenvectors if not c: mapr = self.basis_set.groupby(['set', 'L']).apply( lambda x: x['shell'].unique()).to_dict() self.basis_set_order = _basis_set_order(self[start:stop], mapr, self.atom['set']) # Some fudge factors due to extra lines being printed space = start - lno - 1 fnbas = nbas + space span = start + fnbas * nblocks # Finally get where our chunks are starts = np.arange(start, span, fnbas) stops = np.arange(stop, span, fnbas) stride = 0 # b counts the blocks of eigenvectors per column in momatrix for b, (start, stop) in enumerate(zip(starts, stops)): # Number of eigenvectors in this block ncol = len(self[start][21:].split()) step = nbas * ncol _csv_args['names'] = range(ncol) # Massage the text so that we can read csv block = '\n'.join([ln[21:] for ln in self[start:stop]]) block = _rebaspat.sub(lambda m: _basrep[m.group(0)], block) # Enplacen the resultant unstacked values coefs[stride:stride + nbas * ncol, c] = pd.read_fwf( six.StringIO(block), header=None, widths=np.repeat(10, 5)).unstack().dropna().values stride += step # Index chi, phi chis = np.tile(range(nbas), nbas) orbs = np.repeat(range(nbas), nbas) momatrix = pd.DataFrame(coefs, columns=colnames) momatrix['chi'] = chis momatrix['orbital'] = orbs # Frame not really implemented for momatrix momatrix['frame'] = 0 self.momatrix = momatrix def parse_basis_set_order(self): if hasattr(self, '_basis_set_order'): return self.parse_momatrix() def parse_frame(self): # Frame flags _retoten = 'SCF Done:' _realphaelec = 'alpha electrons' _reelecstate = 'The electronic state' # Get the default frame from the atom table self.frame = compute_frame_from_atom(self.atom) # Find our data found = self.find(_retoten, _realphaelec, _reelecstate) # Extract just the total SCF energies ens = [float(ln.split()[4]) for lno, ln in found[_retoten]] # If 'SCF Done' prints out more times than frames try: ens = ens if len(self.frame) == len(ens) else ens[-len(self.frame):] self.frame['E_tot'] = ens except ValueError: pass # We will assume number of electrons doesn't change per frame ae, x, x, be, x, x = found[_realphaelec][0][1].split() self.frame['N_e'] = int(ae) + int(be) self.frame['N_a'] = int(ae) self.frame['N_b'] = int(be) # Try to get the electronic state but don't try too hard try: states = [] #for lno, ln in found[_reelecstate]: for _, ln in found[_reelecstate]: if 'initial' in ln: continue states.append(ln.split()[4].replace('.', '')) self.frame['state'] = states except (IndexError, ValueError): pass def parse_excitation(self): # TDDFT flags _retddft = 'TD' _reexcst = 'Excited State' chk = self.find(_retddft, stop=1000, keys_only=True) if not chk: return # Find the data found = self.find(_reexcst) keeps, maps, summ = [], [] ,[] for i, (lno, ln) in enumerate(found): summ.append(ln) lno += 1 while '->' in self[lno]: keeps.append(lno) maps.append(i) lno += 1 cols = [0, 1, 2, 'kind', 'eV', 3, 'nm', 4, 'osc', 's2'] summ = pd.read_csv(six.StringIO('\n'.join([ln for lno, ln in found])), delim_whitespace=True, header=None, names=cols, usecols=[c for c in cols if type(c) == str]) summ['s2'] = summ['s2'].str[7:].astype(np.float64) summ['osc'] = summ['osc'].str[2:].astype(np.float64) cols = ['occ', 0, 'virt', 'cont'] conts = pd.read_csv(six.StringIO('\n'.join([self[i] for i in keeps])), delim_whitespace=True, header=None, names=cols, usecols=[c for c in cols if type(c) == str]) conts['map'] = maps for col in summ.columns: conts[col] = conts['map'].map(summ[col]) conts['energy'] = conts['eV'] * Energy['eV', 'Ha'] conts['frame'] = conts['group'] = 0 self.excitation = conts def parse_frequency(self): # Frequency flags _refreq = 'Freq' found = self.regex(_refreq, stop=1000, flags=re.IGNORECASE) # Don't need the input deck or 2 from the summary at the end found = self.find(_refreq)[1:-2] if not found: return # Total lines per block minus the unnecessary ones span = found[1][0] - found[0][0] - 7 dfs, fdx = [], 0 # Iterate over what we found for lno, ln in found: # Get the frequencies first freqs = ln[15:].split() nfreqs = len(freqs) # Get just the atom displacement vectors start = lno + 5 stop = start + span cols = range(2 + 3 * nfreqs) df = self.pandas_dataframe(start, stop, ncol=cols) # Split up the df and unstack it slices = [list(range(2 + i, 2 + 3 * nfreqs, 3)) for i in range(nfreqs)] dx, dy, dz = [df[i].unstack().values for i in slices] # Generate the appropriate dimensions of other columns labels = np.tile(df[0].values, nfreqs) zs = np.tile(df[1].values, nfreqs) freqdxs = np.repeat(range(fdx, fdx + nfreqs), df.shape[0]) freqs = np.repeat(freqs, df.shape[0]) fdx += nfreqs # Put it all together stacked = pd.DataFrame.from_dict({'Z': zs, 'label': labels, 'dx': dx, 'dy': dy, 'dz': dz, 'frequency': freqs, 'freqdx': freqdxs}) stacked['symbol'] = stacked['Z'].map(z2sym) dfs.append(stacked) # Now put all our frequencies together frequency = pd.concat(dfs).reset_index(drop=True) # Pretty sure displacements are in cartesian angstroms # TODO: verify with an external program that vibrational # modes look the same as the ones generated with # this methodology. frequency['dx'] *= Length['Angstrom', 'au'] frequency['dy'] *= Length['Angstrom', 'au'] frequency['dz'] *= Length['Angstrom', 'au'] # Frame not really implemented here either frequency['frame'] = 0 self.frequency = frequency # Below are triangular matrices -- One electron integrals def parse_overlap(self): _reovl01 = '*** Overlap ***' overlap = self._parse_triangular_matrix(_reovl01, 'coef') if overlap is not None: self.overlap = overlap def parse_multipole(self): _reixn = 'IX= {}' mltpl = self._parse_triangular_matrix(_reixn.format(1), 'ix1') if mltpl is not None: mltpl['ix2'] = self._parse_triangular_matrix(_reixn.format(2), 'ix2', True) mltpl['ix3'] = self._parse_triangular_matrix(_reixn.format(3), 'ix3', True) self.multipole = mltpl def __init__(self, *args, **kwargs): super(Output, self).__init__(*args, **kwargs) class Fchk(six.with_metaclass(GauMeta, Editor)): def _intme(self, fitem, idx=0): """Helper gets an integer of interest.""" return int(self[fitem[idx]].split()[-1]) def _dfme(self, fitem, dim, idx=0): """Helper gets an array of interest.""" start = fitem[idx] + 1 col = min(len(self[start].split()), dim) stop = np.ceil(start + dim / col).astype(np.int64) return self.pandas_dataframe(start, stop, col).stack().values def parse_atom(self): # Atom regex _renat = 'Number of atoms' _reznum = 'Atomic numbers' _rezeff = 'Nuclear charges' _reposition = 'Current cartesian coordinates' # Find line numbers of interest found = self.find(_renat, _reznum, _rezeff, _reposition, stop=100, keys_only=True) # Number of atoms in current geometry nat = self._intme(found[_renat]) # Atom identifiers znums = self._dfme(found[_reznum], nat) # Atomic symbols symbols = list(map(lambda x: z2sym[x], znums)) # Z effective if ECPs are used zeffs = self._dfme(found[_rezeff], nat).astype(np.int64) # Atomic positions pos = self._dfme(found[_reposition], nat * 3).reshape(nat, 3) frame = np.zeros(len(symbols), dtype=np.int64) self.atom = pd.DataFrame.from_dict({'symbol': symbols, 'Zeff': zeffs, 'frame': frame, 'x': pos[:,0], 'y': pos[:,1], 'z': pos[:,2], 'set': range(1, len(symbols) + 1)}) def parse_basis_set(self): # Basis set regex _rebasdim = 'Number of basis functions' _recontdim = 'Number of contracted shells' _reprimdim = 'Number of primitive shells' _reshelltype = 'Shell types' _reprimpershell = 'Number of primitives per shell' _reshelltoatom = 'Shell to atom map' _reprimexp = 'Primitive exponents' _recontcoef = 'Contraction coefficients' _repcontcoef = 'P(S=P) Contraction coefficients' found = self.find(_rebasdim, _reshelltype, _reprimpershell, _reshelltoatom, _reprimexp, _recontcoef, _repcontcoef, keys_only=True) # Number of basis functions - UNUSED #nbas = self._intme(found[_rebasdim]) # Number of 'shell to atom' mappings dim1 = self._intme(found[_reshelltype]) # Number of primitive exponents dim2 = self._intme(found[_reprimexp]) # Handle cartesian vs. spherical here # only spherical for now shelltypes = self._dfme(found[_reshelltype], dim1).astype(np.int64) primpershell = self._dfme(found[_reprimpershell], dim1).astype(np.int64) shelltoatom = self._dfme(found[_reshelltoatom], dim1).astype(np.int64) primexps = self._dfme(found[_reprimexp], dim2) contcoefs = self._dfme(found[_recontcoef], dim2) if found[_repcontcoef]: pcontcoefs = self._dfme(found[_repcontcoef], dim2) # Keep track of some things ptr, prevatom, shell, sp = 0, 0, 0, False # Temporary storage of basis set data shldx = defaultdict(int) ddict = defaultdict(list) for atom, nprim, shelltype in zip(shelltoatom, primpershell, shelltypes): if atom != prevatom: prevatom, shldx = atom, defaultdict(int) # Collect the data for this basis set if shelltype == -1: shelltype, sp = 0, True L = np.abs(shelltype) step = ptr + nprim ddict[1].extend(contcoefs[ptr:step].tolist()) ddict[0].extend(primexps[ptr:step].tolist()) ddict['center'].extend([atom] * nprim) ddict['shell'].extend([shldx[L]] * nprim) ddict['L'].extend([L] * nprim) shldx[L] += 1 if sp: shldx[1] = shldx[0] + 1 ddict[1].extend(pcontcoefs[ptr:step].tolist()) ddict[0].extend(primexps[ptr:step].tolist()) ddict['center'].extend([atom] * nprim) ddict['shell'].extend([shldx[1]] * nprim) ddict['L'].extend([1] * nprim) shldx[1] += 1 ptr += nprim sp = False df = pd.DataFrame.from_dict(ddict) df.rename(columns={0: 'alpha', 1: 'd'}, inplace=True) sets, setmap = deduplicate_basis_sets(df) self.basis_set = sets self.meta['spherical'] = True self.atom['set'] = self.atom['set'].map(setmap) # cnts = {key: -1 for key in range(10)} # pcen, pl, pn, shfns = 0, 0, 1, [] # for cen, n, l, seht in zip(df['center'], df['N'], df['L'], # df['center'].map(sets)): # if not pcen == cen: cnts = {key: -1 for key in range(10)} # if (pl != l) or (pn != n) or (pcen != cen): cnts[l] += 1 # shfns.append(mapr[(seht, l)][cnts[l]]) # pcen, pl, pn = cen, l, n # df['shell'] = shfns def parse_basis_set_order(self): # Unique basis sets sets = self.basis_set.groupby('set') data = [] # Gaussian orders basis functions strangely # Will likely need an additional mapping for cartesian lamp = {0: [0], 1: [1, -1, 0], 2: [0, 1, -1, 2, -2], 3: [0, 1, -1, 2, -2, 3, -3], 4: [0, 1, -1, 2, -2, 3, -3, 4, -4], 5: [0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5]} # What was tag column for in basis set order? key = 'tag' if 'tag' in self.atom.columns else 'symbol' # Iterate over atoms for cent, bset, tag in zip(self.atom.index.values, self.atom['set'], self.atom[key]): seht = sets.get_group(bset) # Iterate over basis set pL, psh = -1, -1 for L, sh in zip(seht['L'], seht['shell']): if (pL == L) and (psh == sh): continue for ml in lamp[L]: data.append((tag, cent, L, ml, sh, 0)) pL = L psh = sh columns = ('tag', 'center', 'L', 'ml', 'shell', 'frame') self.basis_set_order = pd.DataFrame(data, columns=columns) def parse_momatrix(self): # MOMatrix regex _rebasdim = 'Number of basis functions' _reindepdim = 'Number of independant functions' _realphaen = 'Alpha Orbital Energies' _reamomatrix = 'Alpha MO coefficients' _rebmomatrix = 'Beta MO coefficients' found = self.find(_rebasdim, _reindepdim, _reamomatrix, _rebmomatrix, keys_only=True) # Again number of basis functions nbas = self._intme(found[_rebasdim]) try: ninp = self._intme(found[_reindepdim]) except IndexError: ninp = nbas ncoef = self._intme(found[_reamomatrix]) # Alpha or closed shell MO coefficients coefs = self._dfme(found[_reamomatrix], ncoef) # Beta MO coefficients if they exist bcoefs = self._dfme(found[_rebmomatrix], ncoef) \ if found[_rebmomatrix] else None # Indexing chis = np.tile(range(nbas), ninp) orbitals = np.repeat(range(ninp), nbas) frame = np.zeros(ncoef, dtype=np.int64) self.momatrix = pd.DataFrame.from_dict({'chi': chis, 'orbital': orbitals, 'coef': coefs, 'frame': frame}) if bcoefs is not None: self.momatrix['coef1'] = bcoefs def parse_orbital(self): # Orbital regex _reorboc = 'Number of .*electrons' _reorben = 'Orbital Energies' found = self.regex(_reorben, _reorboc, keys_only=True) ae = self._intme(found[_reorboc], idx=1) be = self._intme(found[_reorboc], idx=2) nbas = self._intme(found[_reorben]) ens = np.concatenate([self._dfme(found[_reorben], nbas, idx=i) for i, start in enumerate(found[_reorben])]) os = nbas != len(ens) self.orbital = Orbital.from_energies(ens, ae, be, os=os) def __init__(self, *args, **kwargs): super(Fchk, self).__init__(*args, **kwargs) # def _dedup(sets, sp=False): # unique, setmap, cnt = [], {}, 0 # sets = sets.groupby('center') # chk = [0, 1] # for center, seht in sets: # for i, other in enumerate(unique): # if other.shape != seht.shape: continue # if np.allclose(other[chk], seht[chk]): # setmap[center] = i # break # else: # unique.append(seht) # setmap[center] = cnt # cnt += 1 # if sp: unique = _expand_sp(unique) # sets = pd.concat(unique).reset_index(drop=True) # try: sets.drop([2, 3], axis=1, inplace=True) # except ValueError: pass # sets.rename(columns={'center': 'set', 0: 'alpha', 1: 'd'}, inplace=True) # sets['set'] = sets['set'].map(setmap) # sets['frame'] = 0 # return sets, setmap # # # def _expand_sp(unique): # expand = [] # for seht in unique: # if np.isnan(seht[2]).sum() == seht.shape[0]: # expand.append(seht) # continue # sps = seht[2][~np.isnan(seht[2])].index # shls = len(seht.ix[sps]['shell'].unique()) # dupl = seht.ix[sps[0]:sps[-1]].copy() # dupl[1] = dupl[2] # dupl['L'] = 1 # dupl['shell'] += shls # last = seht.ix[sps[-1] + 1:].copy() # last['shell'] += shls # expand.append(pd.concat([seht.ix[:sps[0] - 1], # seht.ix[sps[0]:sps[-1]], # dupl, last])) # return expand def _basis_set_order(chunk, mapr, sets): # Gaussian only prints the atom center # and label once for all basis functions first = len(chunk[0]) - len(chunk[0].lstrip(' ')) + 1 df = pd.read_fwf(six.StringIO('\n'.join(chunk)), widths=[first, 4, 3, 2, 4], header=None) df[1].fillna(method='ffill', inplace=True) df[1] = df[1].astype(np.int64) - 1 df[2].fillna(method='ffill', inplace=True) df.rename(columns={1: 'center', 3: 'N', 4: 'ang'}, inplace=True) df['N'] = df['N'].astype(np.int64) - 1 if 'XX' in df['ang'].values: df[['L', 'l', 'm', 'n']] = df['ang'].map({'S': [0, 0, 0, 0], 'XX': [2, 2, 0, 0], 'XY': [2, 1, 1, 0], 'XZ': [2, 1, 0, 1], 'YY': [2, 0, 2, 0], 'YZ': [2, 0, 1, 1], 'ZZ': [2, 0, 0, 2], 'PX': [1, 1, 0, 0], 'PY': [1, 0, 1, 0], 'PZ': [1, 0, 0, 1], }).apply(tuple).apply(pd.Series) else: df['L'] = df['ang'].str[:1].str.lower().map(lmap).astype(np.int64) df['ml'] = df['ang'].str[1:] df['ml'].update(df['ml'].map({'': 0, 'X': 1, 'Y': -1, 'Z': 0})) df['ml'] = df['ml'].astype(np.int64) cnts = {key: -1 for key in range(10)} pcen, pl, pn, shfns = 0, 0, 1, [] for cen, n, l, seht in zip(df['center'], df['N'], df['L'], df['center'].map(sets)): if not pcen == cen: cnts = {key: -1 for key in range(10)} if (pl != l) or (pn != n) or (pcen != cen): cnts[l] += 1 shfns.append(mapr[(seht, l)][cnts[l]]) pcen, pl, pn = cen, l, n df['shell'] = shfns df.drop([0, 2, 'N', 'ang'], axis=1, inplace=True) df['frame'] = 0 return df
apache-2.0
zfrenchee/pandas
pandas/tests/indexing/test_floats.py
1
27853
# -*- coding: utf-8 -*- import pytest from warnings import catch_warnings import numpy as np from pandas import Series, DataFrame, Index, Float64Index from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm class TestFloatIndexers(object): def check(self, result, original, indexer, getitem): """ comparator for results we need to take care if we are indexing on a Series or a frame """ if isinstance(original, Series): expected = original.iloc[indexer] else: if getitem: expected = original.iloc[:, indexer] else: expected = original.iloc[indexer] assert_almost_equal(result, expected) def test_scalar_error(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors # this duplicates the code below # but is spefically testing for the error # message for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeCategoricalIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex, tm.makeIntIndex, tm.makeRangeIndex]: i = index(5) s = Series(np.arange(len(i)), index=i) def f(): s.iloc[3.0] tm.assert_raises_regex(TypeError, 'cannot do positional indexing', f) def f(): s.iloc[3.0] = 0 pytest.raises(TypeError, f) def test_scalar_non_numeric(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeCategoricalIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex]: i = index(5) for s in [Series( np.arange(len(i)), index=i), DataFrame( np.random.randn( len(i), len(i)), index=i, columns=i)]: # getting for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.iloc, False), (lambda x: x, True)]: def f(): with catch_warnings(record=True): idxr(s)[3.0] # gettitem on a DataFrame is a KeyError as it is indexing # via labels on the columns if getitem and isinstance(s, DataFrame): error = KeyError else: error = TypeError pytest.raises(error, f) # label based can be a TypeError or KeyError def f(): s.loc[3.0] if s.index.inferred_type in ['string', 'unicode', 'mixed']: error = KeyError else: error = TypeError pytest.raises(error, f) # contains assert 3.0 not in s # setting with a float fails with iloc def f(): s.iloc[3.0] = 0 pytest.raises(TypeError, f) # setting with an indexer if s.index.inferred_type in ['categorical']: # Value or Type Error pass elif s.index.inferred_type in ['datetime64', 'timedelta64', 'period']: # these should prob work # and are inconsisten between series/dataframe ATM # for idxr in [lambda x: x.ix, # lambda x: x]: # s2 = s.copy() # def f(): # idxr(s2)[3.0] = 0 # pytest.raises(TypeError, f) pass else: s2 = s.copy() s2.loc[3.0] = 10 assert s2.index.is_object() for idxr in [lambda x: x.ix, lambda x: x]: s2 = s.copy() with catch_warnings(record=True): idxr(s2)[3.0] = 0 assert s2.index.is_object() # fallsback to position selection, series only s = Series(np.arange(len(i)), index=i) s[3] pytest.raises(TypeError, lambda: s[3.0]) def test_scalar_with_mixed(self): s2 = Series([1, 2, 3], index=['a', 'b', 'c']) s3 = Series([1, 2, 3], index=['a', 'b', 1.5]) # lookup in a pure string index # with an invalid indexer for idxr in [lambda x: x.ix, lambda x: x, lambda x: x.iloc]: def f(): with catch_warnings(record=True): idxr(s2)[1.0] pytest.raises(TypeError, f) pytest.raises(KeyError, lambda: s2.loc[1.0]) result = s2.loc['b'] expected = 2 assert result == expected # mixed index so we have label # indexing for idxr in [lambda x: x]: def f(): idxr(s3)[1.0] pytest.raises(TypeError, f) result = idxr(s3)[1] expected = 2 assert result == expected # mixed index so we have label # indexing for idxr in [lambda x: x.ix]: with catch_warnings(record=True): def f(): idxr(s3)[1.0] pytest.raises(TypeError, f) result = idxr(s3)[1] expected = 2 assert result == expected pytest.raises(TypeError, lambda: s3.iloc[1.0]) pytest.raises(KeyError, lambda: s3.loc[1.0]) result = s3.loc[1.5] expected = 3 assert result == expected def test_scalar_integer(self): # test how scalar float indexers work on int indexes # integer index for index in [tm.makeIntIndex, tm.makeRangeIndex]: i = index(5) for s in [Series(np.arange(len(i))), DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)]: # coerce to equal int for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: with catch_warnings(record=True): result = idxr(s)[3.0] self.check(result, s, 3, getitem) # coerce to equal int for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: if isinstance(s, Series): def compare(x, y): assert x == y expected = 100 else: compare = tm.assert_series_equal if getitem: expected = Series(100, index=range(len(s)), name=3) else: expected = Series(100., index=range(len(s)), name=3) s2 = s.copy() with catch_warnings(record=True): idxr(s2)[3.0] = 100 result = idxr(s2)[3.0] compare(result, expected) result = idxr(s2)[3] compare(result, expected) # contains # coerce to equal int assert 3.0 in s def test_scalar_float(self): # scalar float indexers work on a float index index = Index(np.arange(5.)) for s in [Series(np.arange(len(index)), index=index), DataFrame(np.random.randn(len(index), len(index)), index=index, columns=index)]: # assert all operations except for iloc are ok indexer = index[3] for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: # getting with catch_warnings(record=True): result = idxr(s)[indexer] self.check(result, s, 3, getitem) # setting s2 = s.copy() def f(): with catch_warnings(record=True): idxr(s2)[indexer] = expected with catch_warnings(record=True): result = idxr(s2)[indexer] self.check(result, s, 3, getitem) # random integer is a KeyError with catch_warnings(record=True): pytest.raises(KeyError, lambda: idxr(s)[3.5]) # contains assert 3.0 in s # iloc succeeds with an integer expected = s.iloc[3] s2 = s.copy() s2.iloc[3] = expected result = s2.iloc[3] self.check(result, s, 3, False) # iloc raises with a float pytest.raises(TypeError, lambda: s.iloc[3.0]) def g(): s2.iloc[3.0] = 0 pytest.raises(TypeError, g) def test_slice_non_numeric(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex]: index = index(5) for s in [Series(range(5), index=index), DataFrame(np.random.randn(5, 2), index=index)]: # getitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: def f(): s.iloc[l] pytest.raises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: def f(): with catch_warnings(record=True): idxr(s)[l] pytest.raises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: def f(): s.iloc[l] = 0 pytest.raises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: def f(): with catch_warnings(record=True): idxr(s)[l] = 0 pytest.raises(TypeError, f) def test_slice_integer(self): # same as above, but for Integer based indexes # these coerce to a like integer # oob indicates if we are out of bounds # of positional indexing for index, oob in [(tm.makeIntIndex(5), False), (tm.makeRangeIndex(5), False), (tm.makeIntIndex(5) + 10, True)]: # s is an in-range index s = Series(range(5), index=index) # getitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: with catch_warnings(record=True): result = idxr(s)[l] # these are all label indexing # except getitem which is positional # empty if oob: indexer = slice(0, 0) else: indexer = slice(3, 5) self.check(result, s, indexer, False) # positional indexing def f(): s[l] pytest.raises(TypeError, f) # getitem out-of-bounds for l in [slice(-6, 6), slice(-6.0, 6.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: with catch_warnings(record=True): result = idxr(s)[l] # these are all label indexing # except getitem which is positional # empty if oob: indexer = slice(0, 0) else: indexer = slice(-6, 6) self.check(result, s, indexer, False) # positional indexing def f(): s[slice(-6.0, 6.0)] pytest.raises(TypeError, f) # getitem odd floats for l, res1 in [(slice(2.5, 4), slice(3, 5)), (slice(2, 3.5), slice(2, 4)), (slice(2.5, 3.5), slice(3, 4))]: for idxr in [lambda x: x.loc, lambda x: x.ix]: with catch_warnings(record=True): result = idxr(s)[l] if oob: res = slice(0, 0) else: res = res1 self.check(result, s, res, False) # positional indexing def f(): s[l] pytest.raises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: sc = s.copy() with catch_warnings(record=True): idxr(sc)[l] = 0 result = idxr(sc)[l].values.ravel() assert (result == 0).all() # positional indexing def f(): s[l] = 0 pytest.raises(TypeError, f) def test_integer_positional_indexing(self): """ make sure that we are raising on positional indexing w.r.t. an integer index """ s = Series(range(2, 6), index=range(2, 6)) result = s[2:4] expected = s.iloc[2:4] assert_series_equal(result, expected) for idxr in [lambda x: x, lambda x: x.iloc]: for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]: def f(): idxr(s)[l] pytest.raises(TypeError, f) def test_slice_integer_frame_getitem(self): # similar to above, but on the getitem dim (of a DataFrame) for index in [tm.makeIntIndex, tm.makeRangeIndex]: index = index(5) s = DataFrame(np.random.randn(5, 2), index=index) def f(idxr): # getitem for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]: result = idxr(s)[l] indexer = slice(0, 2) self.check(result, s, indexer, False) # positional indexing def f(): s[l] pytest.raises(TypeError, f) # getitem out-of-bounds for l in [slice(-10, 10), slice(-10.0, 10.0)]: result = idxr(s)[l] self.check(result, s, slice(-10, 10), True) # positional indexing def f(): s[slice(-10.0, 10.0)] pytest.raises(TypeError, f) # getitem odd floats for l, res in [(slice(0.5, 1), slice(1, 2)), (slice(0, 0.5), slice(0, 1)), (slice(0.5, 1.5), slice(1, 2))]: result = idxr(s)[l] self.check(result, s, res, False) # positional indexing def f(): s[l] pytest.raises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: sc = s.copy() idxr(sc)[l] = 0 result = idxr(sc)[l].values.ravel() assert (result == 0).all() # positional indexing def f(): s[l] = 0 pytest.raises(TypeError, f) f(lambda x: x.loc) with catch_warnings(record=True): f(lambda x: x.ix) def test_slice_float(self): # same as above, but for floats index = Index(np.arange(5.)) + 0.1 for s in [Series(range(5), index=index), DataFrame(np.random.randn(5, 2), index=index)]: for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: expected = s.iloc[3:4] for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x]: # getitem with catch_warnings(record=True): result = idxr(s)[l] if isinstance(s, Series): tm.assert_series_equal(result, expected) else: tm.assert_frame_equal(result, expected) # setitem s2 = s.copy() with catch_warnings(record=True): idxr(s2)[l] = 0 result = idxr(s2)[l].values.ravel() assert (result == 0).all() def test_floating_index_doc_example(self): index = Index([1.5, 2, 3, 4.5, 5]) s = Series(range(5), index=index) assert s[3] == 2 assert s.loc[3] == 2 assert s.loc[3] == 2 assert s.iloc[3] == 3 def test_floating_misc(self): # related 236 # scalar/slicing of a float index s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64) # label based slicing result1 = s[1.0:3.0] result2 = s.loc[1.0:3.0] result3 = s.loc[1.0:3.0] assert_series_equal(result1, result2) assert_series_equal(result1, result3) # exact indexing when found result1 = s[5.0] result2 = s.loc[5.0] result3 = s.loc[5.0] assert result1 == result2 assert result1 == result3 result1 = s[5] result2 = s.loc[5] result3 = s.loc[5] assert result1 == result2 assert result1 == result3 assert s[5.0] == s[5] # value not found (and no fallbacking at all) # scalar integers pytest.raises(KeyError, lambda: s.loc[4]) pytest.raises(KeyError, lambda: s.loc[4]) pytest.raises(KeyError, lambda: s[4]) # fancy floats/integers create the correct entry (as nan) # fancy tests expected = Series([2, 0], index=Float64Index([5.0, 0.0])) for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float assert_series_equal(s[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) expected = Series([2, 0], index=Index([5, 0], dtype='int64')) for fancy_idx in [[5, 0], np.array([5, 0])]: # int assert_series_equal(s[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) # all should return the same as we are slicing 'the same' result1 = s.loc[2:5] result2 = s.loc[2.0:5.0] result3 = s.loc[2.0:5] result4 = s.loc[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) # previously this did fallback indexing result1 = s[2:5] result2 = s[2.0:5.0] result3 = s[2.0:5] result4 = s[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) result1 = s.loc[2:5] result2 = s.loc[2.0:5.0] result3 = s.loc[2.0:5] result4 = s.loc[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) # combined test result1 = s.loc[2:5] result2 = s.loc[2:5] result3 = s[2:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) # list selection result1 = s[[0.0, 5, 10]] result2 = s.loc[[0.0, 5, 10]] result3 = s.loc[[0.0, 5, 10]] result4 = s.iloc[[0, 2, 4]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) result1 = s[[1.6, 5, 10]] result2 = s.loc[[1.6, 5, 10]] result3 = s.loc[[1.6, 5, 10]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, Series( [np.nan, 2, 4], index=[1.6, 5, 10])) result1 = s[[0, 1, 2]] result2 = s.loc[[0, 1, 2]] result3 = s.loc[[0, 1, 2]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, Series( [0.0, np.nan, np.nan], index=[0, 1, 2])) result1 = s.loc[[2.5, 5]] result2 = s.loc[[2.5, 5]] assert_series_equal(result1, result2) assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0])) result1 = s[[2.5]] result2 = s.loc[[2.5]] result3 = s.loc[[2.5]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, Series([1], index=[2.5])) def test_floating_tuples(self): # see gh-13509 s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name='foo') result = s[0.0] assert result == (1, 1) expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name='foo') s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name='foo') result = s[0.0] tm.assert_series_equal(result, expected) def test_float64index_slicing_bug(self): # GH 5557, related to slicing a float index ser = {256: 2321.0, 1: 78.0, 2: 2716.0, 3: 0.0, 4: 369.0, 5: 0.0, 6: 269.0, 7: 0.0, 8: 0.0, 9: 0.0, 10: 3536.0, 11: 0.0, 12: 24.0, 13: 0.0, 14: 931.0, 15: 0.0, 16: 101.0, 17: 78.0, 18: 9643.0, 19: 0.0, 20: 0.0, 21: 0.0, 22: 63761.0, 23: 0.0, 24: 446.0, 25: 0.0, 26: 34773.0, 27: 0.0, 28: 729.0, 29: 78.0, 30: 0.0, 31: 0.0, 32: 3374.0, 33: 0.0, 34: 1391.0, 35: 0.0, 36: 361.0, 37: 0.0, 38: 61808.0, 39: 0.0, 40: 0.0, 41: 0.0, 42: 6677.0, 43: 0.0, 44: 802.0, 45: 0.0, 46: 2691.0, 47: 0.0, 48: 3582.0, 49: 0.0, 50: 734.0, 51: 0.0, 52: 627.0, 53: 70.0, 54: 2584.0, 55: 0.0, 56: 324.0, 57: 0.0, 58: 605.0, 59: 0.0, 60: 0.0, 61: 0.0, 62: 3989.0, 63: 10.0, 64: 42.0, 65: 0.0, 66: 904.0, 67: 0.0, 68: 88.0, 69: 70.0, 70: 8172.0, 71: 0.0, 72: 0.0, 73: 0.0, 74: 64902.0, 75: 0.0, 76: 347.0, 77: 0.0, 78: 36605.0, 79: 0.0, 80: 379.0, 81: 70.0, 82: 0.0, 83: 0.0, 84: 3001.0, 85: 0.0, 86: 1630.0, 87: 7.0, 88: 364.0, 89: 0.0, 90: 67404.0, 91: 9.0, 92: 0.0, 93: 0.0, 94: 7685.0, 95: 0.0, 96: 1017.0, 97: 0.0, 98: 2831.0, 99: 0.0, 100: 2963.0, 101: 0.0, 102: 854.0, 103: 0.0, 104: 0.0, 105: 0.0, 106: 0.0, 107: 0.0, 108: 0.0, 109: 0.0, 110: 0.0, 111: 0.0, 112: 0.0, 113: 0.0, 114: 0.0, 115: 0.0, 116: 0.0, 117: 0.0, 118: 0.0, 119: 0.0, 120: 0.0, 121: 0.0, 122: 0.0, 123: 0.0, 124: 0.0, 125: 0.0, 126: 67744.0, 127: 22.0, 128: 264.0, 129: 0.0, 260: 197.0, 268: 0.0, 265: 0.0, 269: 0.0, 261: 0.0, 266: 1198.0, 267: 0.0, 262: 2629.0, 258: 775.0, 257: 0.0, 263: 0.0, 259: 0.0, 264: 163.0, 250: 10326.0, 251: 0.0, 252: 1228.0, 253: 0.0, 254: 2769.0, 255: 0.0} # smoke test for the repr s = Series(ser) result = s.value_counts() str(result)
bsd-3-clause
ukuleleplayer/pureples
pureples/experiments/mountain_car/run_all_mountain_car.py
1
16639
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import neat_mountain_car import hyperneat_mountain_car import es_hyperneat_mountain_car_small import es_hyperneat_mountain_car_medium import es_hyperneat_mountain_car_large import gym import multiprocessing as multi from multiprocessing import Manager # Initialize lists to keep track during run. manager = Manager() neat_stats, hyperneat_stats, es_hyperneat_small_stats = manager.list([]), manager.list([]), manager.list([]) es_hyperneat_medium_stats, es_hyperneat_large_stats = manager.list([]), manager.list([]) neat_run_one_fitnesses, hyperneat_run_one_fitnesses, es_hyperneat_small_run_one_fitnesses = [], [], [] es_hyperneat_medium_run_one_fitnesses, es_hyperneat_large_run_one_fitnesses = [], [] neat_run_ten_fitnesses, hyperneat_run_ten_fitnesses, es_hyperneat_small_run_ten_fitnesses = [], [], [] es_hyperneat_medium_run_ten_fitnesses, es_hyperneat_large_run_ten_fitnesses = [], [] neat_one_solved, hyperneat_one_solved, es_hyperneat_small_one_solved = 0, 0, 0 es_hyperneat_medium_one_solved, es_hyperneat_large_one_solved = 0, 0 neat_ten_solved, hyperneat_ten_solved, es_hyperneat_small_ten_solved = 0, 0, 0 es_hyperneat_medium_ten_solved, es_hyperneat_large_ten_solved = 0, 0 runs = 16 inputs = range(runs) gens = 200 fit_threshold = -110 max_fit = -110 env = gym.make("MountainCar-v0") # Run the experiments. def run(i): print("This is run #" + str(i)) neat_stats.append(neat_mountain_car.run(gens, env)[1]) hyperneat_stats.append(hyperneat_mountain_car.run(gens, env)[1]) es_hyperneat_small_stats.append(es_hyperneat_mountain_car_small.run(gens, env)[1]) es_hyperneat_medium_stats.append(es_hyperneat_mountain_car_medium.run(gens, env)[1]) es_hyperneat_large_stats.append(es_hyperneat_mountain_car_large.run(gens, env)[1]) p = multi.Pool(multi.cpu_count()) p.map(run,range(runs)) # Average the NEAT runs. temp_fit_one = [0.0] * gens temp_fit_ten = [0.0] * gens for (stat_one, stat_ten) in neat_stats: if stat_one.best_genome().fitness > max_fit: neat_run_one_fitnesses.append(max_fit) else: neat_run_one_fitnesses.append(stat_one.best_genome().fitness) if stat_ten.best_genome().fitness > max_fit: neat_run_ten_fitnesses.append(max_fit) else: neat_run_ten_fitnesses.append(stat_one.best_genome().fitness) if stat_one.best_genome().fitness >= fit_threshold: neat_one_solved += 1 if stat_ten.best_genome().fitness >= fit_threshold: neat_ten_solved += 1 for i in range(gens): if i < len(stat_one.most_fit_genomes): if stat_one.most_fit_genomes[i].fitness > max_fit: temp_fit_one[i] += max_fit else: temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness else: temp_fit_one[i] += max_fit if i < len(stat_ten.most_fit_genomes): if stat_ten.most_fit_genomes[i].fitness > max_fit: temp_fit_ten[i] += max_fit else: temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness else: temp_fit_ten[i] += max_fit neat_one_average_fit = [x / runs for x in temp_fit_one] neat_ten_average_fit = [x / runs for x in temp_fit_ten] # Average the HyperNEAT runs. temp_fit_one = [0.0] * gens temp_fit_ten = [0.0] * gens for (stat_one, stat_ten) in hyperneat_stats: if stat_one.best_genome().fitness > max_fit: hyperneat_run_one_fitnesses.append(max_fit) else: hyperneat_run_one_fitnesses.append(stat_one.best_genome().fitness) if stat_ten.best_genome().fitness > max_fit: hyperneat_run_ten_fitnesses.append(max_fit) else: hyperneat_run_ten_fitnesses.append(stat_one.best_genome().fitness) if stat_one.best_genome().fitness >= fit_threshold: hyperneat_one_solved += 1 if stat_ten.best_genome().fitness >= fit_threshold: hyperneat_ten_solved += 1 for i in range(gens): if i < len(stat_one.most_fit_genomes): if stat_one.most_fit_genomes[i].fitness > max_fit: temp_fit_one[i] += max_fit else: temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness else: temp_fit_one[i] += max_fit if i < len(stat_ten.most_fit_genomes): if stat_ten.most_fit_genomes[i].fitness > max_fit: temp_fit_ten[i] += max_fit else: temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness else: temp_fit_ten[i] += max_fit hyperneat_one_average_fit = [x / runs for x in temp_fit_one] hyperneat_ten_average_fit = [x / runs for x in temp_fit_ten] # Average the small ES-HyperNEAT runs. temp_fit_one = [0.0] * gens temp_fit_ten = [0.0] * gens for (stat_one, stat_ten) in es_hyperneat_small_stats: if stat_one.best_genome().fitness > max_fit: es_hyperneat_small_run_one_fitnesses.append(max_fit) else: es_hyperneat_small_run_one_fitnesses.append(stat_one.best_genome().fitness) if stat_ten.best_genome().fitness > max_fit: es_hyperneat_small_run_ten_fitnesses.append(max_fit) else: es_hyperneat_small_run_ten_fitnesses.append(stat_one.best_genome().fitness) if stat_one.best_genome().fitness >= fit_threshold: es_hyperneat_small_one_solved += 1 if stat_ten.best_genome().fitness >= fit_threshold: es_hyperneat_small_ten_solved += 1 for i in range(gens): if i < len(stat_one.most_fit_genomes): if stat_one.most_fit_genomes[i].fitness > max_fit: temp_fit_one[i] += max_fit else: temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness else: temp_fit_one[i] += max_fit if i < len(stat_ten.most_fit_genomes): if stat_ten.most_fit_genomes[i].fitness > max_fit: temp_fit_ten[i] += max_fit else: temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness else: temp_fit_ten[i] += max_fit es_hyperneat_small_one_average_fit = [x / runs for x in temp_fit_one] es_hyperneat_small_ten_average_fit = [x / runs for x in temp_fit_ten] # Average the medium ES-HyperNEAT runs. temp_fit_one = [0.0] * gens temp_fit_ten = [0.0] * gens for (stat_one, stat_ten) in es_hyperneat_medium_stats: if stat_one.best_genome().fitness > max_fit: es_hyperneat_medium_run_one_fitnesses.append(max_fit) else: es_hyperneat_medium_run_one_fitnesses.append(stat_one.best_genome().fitness) if stat_ten.best_genome().fitness > max_fit: es_hyperneat_medium_run_ten_fitnesses.append(max_fit) else: es_hyperneat_medium_run_ten_fitnesses.append(stat_one.best_genome().fitness) if stat_one.best_genome().fitness >= fit_threshold: es_hyperneat_medium_one_solved += 1 if stat_ten.best_genome().fitness >= fit_threshold: es_hyperneat_medium_ten_solved += 1 for i in range(gens): if i < len(stat_one.most_fit_genomes): if stat_one.most_fit_genomes[i].fitness > max_fit: temp_fit_one[i] += max_fit else: temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness else: temp_fit_one[i] += max_fit if i < len(stat_ten.most_fit_genomes): if stat_ten.most_fit_genomes[i].fitness > max_fit: temp_fit_ten[i] += max_fit else: temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness else: temp_fit_ten[i] += max_fit es_hyperneat_medium_one_average_fit = [x / runs for x in temp_fit_one] es_hyperneat_medium_ten_average_fit = [x / runs for x in temp_fit_ten] # Average the large ES-HyperNEAT runs. temp_fit_one = [0.0] * gens temp_fit_ten = [0.0] * gens for (stat_one, stat_ten) in es_hyperneat_large_stats: if stat_one.best_genome().fitness > max_fit: es_hyperneat_large_run_one_fitnesses.append(max_fit) else: es_hyperneat_large_run_one_fitnesses.append(stat_one.best_genome().fitness) if stat_ten.best_genome().fitness > max_fit: es_hyperneat_large_run_ten_fitnesses.append(max_fit) else: es_hyperneat_large_run_ten_fitnesses.append(stat_one.best_genome().fitness) if stat_one.best_genome().fitness >= fit_threshold: es_hyperneat_large_one_solved += 1 if stat_ten.best_genome().fitness >= fit_threshold: es_hyperneat_large_ten_solved += 1 for i in range(gens): if i < len(stat_one.most_fit_genomes): if stat_one.most_fit_genomes[i].fitness > max_fit: temp_fit_one[i] += max_fit else: temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness else: temp_fit_one[i] += max_fit if i < len(stat_ten.most_fit_genomes): if stat_ten.most_fit_genomes[i].fitness > max_fit: temp_fit_ten[i] += max_fit else: temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness else: temp_fit_ten[i] += max_fit es_hyperneat_large_one_average_fit = [x / runs for x in temp_fit_one] es_hyperneat_large_ten_average_fit = [x / runs for x in temp_fit_ten] # Write fitnesses to files. # NEAT. thefile = open('neat_mountain_car_run_fitnesses.txt', 'w+') thefile.write("NEAT one\n") for item in neat_run_one_fitnesses: thefile.write("%s\n" % item) if max_fit in neat_one_average_fit: thefile.write("NEAT one solves mountain_car at generation: " + str(neat_one_average_fit.index(max_fit))) else: thefile.write("NEAT one does not solve mountain_car with best fitness: " + str(neat_one_average_fit[gens-1])) thefile.write("\nNEAT one solves mountain_car in " + str(neat_one_solved) + " out of " + str(runs) + " runs.\n") thefile.write("NEAT ten\n") for item in neat_run_ten_fitnesses: thefile.write("%s\n" % item) if max_fit in neat_ten_average_fit: thefile.write("NEAT ten solves mountain_car at generation: " + str(neat_ten_average_fit.index(max_fit))) else: thefile.write("NEAT ten does not solve mountain_car with best fitness: " + str(neat_ten_average_fit[gens-1])) thefile.write("\nNEAT ten solves mountain_car in " + str(neat_ten_solved) + " out of " + str(runs) + " runs.\n") # HyperNEAT. thefile = open('hyperneat_mountain_car_run_fitnesses.txt', 'w+') thefile.write("HyperNEAT one\n") for item in hyperneat_run_one_fitnesses: thefile.write("%s\n" % item) if max_fit in hyperneat_one_average_fit: thefile.write("HyperNEAT one solves mountain_car at generation: " + str(hyperneat_one_average_fit.index(max_fit))) else: thefile.write("HyperNEAT one does not solve mountain_car with best fitness: " + str(hyperneat_one_average_fit[gens-1])) thefile.write("\nHyperNEAT one solves mountain_car in " + str(hyperneat_one_solved) + " out of " + str(runs) + " runs.\n") thefile.write("HyperNEAT ten\n") for item in hyperneat_run_ten_fitnesses: thefile.write("%s\n" % item) if max_fit in hyperneat_ten_average_fit: thefile.write("HyperNEAT ten solves mountain_car at generation: " + str(hyperneat_ten_average_fit.index(max_fit))) else: thefile.write("HyperNEAT ten does not solve mountain_car with best fitness: " + str(hyperneat_ten_average_fit[gens-1])) thefile.write("\nHyperNEAT ten solves mountain_car in " + str(hyperneat_ten_solved) + " out of " + str(runs) + " runs.\n") # ES-HyperNEAT small. thefile = open('es_hyperneat_mountain_car_small_run_fitnesses.txt', 'w+') thefile.write("ES-HyperNEAT small one\n") for item in es_hyperneat_small_run_one_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_small_one_average_fit: thefile.write("ES-HyperNEAT small one solves mountain_car at generation: " + str(es_hyperneat_small_one_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT small one does not solve mountain_car with best fitness: " + str(es_hyperneat_small_one_average_fit[gens-1])) thefile.write("\nES-HyperNEAT small one solves mountain_car in " + str(es_hyperneat_small_one_solved) + " out of " + str(runs) + " runs.\n") thefile.write("ES-HyperNEAT small ten\n") for item in es_hyperneat_small_run_ten_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_small_ten_average_fit: thefile.write("ES-HyperNEAT small ten solves mountain_car at generation: " + str(es_hyperneat_small_ten_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT small ten does not solve mountain_car with best fitness: " + str(es_hyperneat_small_ten_average_fit[gens-1])) thefile.write("\nES-HyperNEAT small ten solves mountain_car in " + str(es_hyperneat_small_ten_solved) + " out of " + str(runs) + " runs.\n") # ES-HyperNEAT medium. thefile = open('es_hyperneat_mountain_car_medium_run_fitnesses.txt', 'w+') thefile.write("ES-HyperNEAT medium one\n") for item in es_hyperneat_medium_run_one_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_medium_one_average_fit: thefile.write("ES-HyperNEAT medium one solves mountain_car at generation: " + str(es_hyperneat_medium_one_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT medium one does not solve mountain_car with best fitness: " + str(es_hyperneat_medium_one_average_fit[gens-1])) thefile.write("\nES-HyperNEAT medium one solves mountain_car in " + str(es_hyperneat_medium_one_solved) + " out of " + str(runs) + " runs.\n") thefile.write("ES-HyperNEAT medium ten\n") for item in es_hyperneat_medium_run_ten_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_medium_ten_average_fit: thefile.write("ES-HyperNEAT medium ten solves mountain_car at generation: " + str(es_hyperneat_medium_ten_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT medium ten does not solve mountain_car with best fitness: " + str(es_hyperneat_medium_ten_average_fit[gens-1])) thefile.write("\nES-HyperNEAT medium ten solves mountain_car in " + str(es_hyperneat_medium_ten_solved) + " out of " + str(runs) + " runs.\n") # ES-HyperNEAT large. thefile = open('es_hyperneat_mountain_car_large_run_fitnesses.txt', 'w+') thefile.write("ES-HyperNEAT large one\n") for item in es_hyperneat_large_run_one_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_large_one_average_fit: thefile.write("ES-HyperNEAT large one solves mountain_car at generation: " + str(es_hyperneat_large_one_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT large one does not solve mountain_car with best fitness: " + str(es_hyperneat_large_one_average_fit[gens-1])) thefile.write("\nES-HyperNEAT large one solves mountain_car in " + str(es_hyperneat_large_one_solved) + " out of " + str(runs) + " runs.\n") thefile.write("ES-HyperNEAT large ten\n") for item in es_hyperneat_large_run_ten_fitnesses: thefile.write("%s\n" % item) if max_fit in es_hyperneat_large_ten_average_fit: thefile.write("ES-HyperNEAT large ten solves mountain_car at generation: " + str(es_hyperneat_large_ten_average_fit.index(max_fit))) else: thefile.write("ES-HyperNEAT large ten does not solve mountain_car with best fitness: " + str(es_hyperneat_large_ten_average_fit[gens-1])) thefile.write("\nES-HyperNEAT large ten solves mountain_car in " + str(es_hyperneat_large_ten_solved) + " out of " + str(runs) + " runs.\n") # Plot one fitnesses. plt.plot(range(gens), neat_one_average_fit, 'r-', label="NEAT") plt.plot(range(gens), hyperneat_one_average_fit, 'g--', label="HyperNEAT") plt.plot(range(gens), es_hyperneat_small_one_average_fit, 'b-.', label="ES-HyperNEAT small") plt.plot(range(gens), es_hyperneat_medium_one_average_fit, 'c-.', label="ES-HyperNEAT medium") plt.plot(range(gens), es_hyperneat_large_one_average_fit, 'm-.', label="ES-HyperNEAT large") plt.title("Average mountain_car fitnesses one episode") plt.xlabel("Generations") plt.ylabel("Fitness") plt.grid() plt.legend(loc="best") plt.savefig('mountain_car_one_fitnesses.svg') plt.close() # Plot ten fitnesses. plt.plot(range(gens), neat_ten_average_fit, 'r-', label="NEAT") plt.plot(range(gens), hyperneat_ten_average_fit, 'g--', label="HyperNEAT") plt.plot(range(gens), es_hyperneat_small_ten_average_fit, 'b-.', label="ES-HyperNEAT small") plt.plot(range(gens), es_hyperneat_medium_ten_average_fit, 'c-.', label="ES-HyperNEAT medium") plt.plot(range(gens), es_hyperneat_large_ten_average_fit, 'm-.', label="ES-HyperNEAT large") plt.title("Average mountain_car fitnesses ten episodes") plt.xlabel("Generations") plt.ylabel("Fitness") plt.grid() plt.legend(loc="best") plt.savefig('mountain_car_ten_fitnesses.svg') plt.close()
mit
sniemi/EuclidVisibleInstrument
analysis/saturatedArea.py
1
7321
""" Saturated Imaging Area ====================== This scripts can be used to study the imaging area that is impacted by bright stars. :requires: NumPy :requires: matplotlib :requires: VISsim-Python :version: 0.2 :author: Sami-Matias Niemi :contact: s.niemi@ucl.ac.uk """ import matplotlib matplotlib.rc('text', usetex=True) matplotlib.rcParams['font.size'] = 17 matplotlib.rc('xtick', labelsize=14) matplotlib.rc('axes', linewidth=1.1) matplotlib.rcParams['legend.fontsize'] = 11 matplotlib.rcParams['legend.handlelength'] = 3 matplotlib.rcParams['xtick.major.size'] = 5 matplotlib.rcParams['ytick.major.size'] = 5 import matplotlib.pyplot as plt import numpy as np import math, bisect from support.VISinstrumentModel import VISinformation from sources import stellarNumberCounts def pixelsImpacted(magnitude, exptime=565, pixelFractions=(0.65, 0.4, 0.35, 0.18, 0.09, 0.05), star=False, lookup=True): """ This potentially overestimates because does not consider the fact that bleeding is along the column and hence some saturated pixels may be double counted. """ if lookup: #use a lookup table data = [(0, 311609), (1, 251766), (2, 181504), (3, 119165), (4, 75173), (5, 46298), (6, 28439), (7, 18181), (8, 12491), (9, 7552), (10, 4246), (11, 1652), (12, 636), (13, 247), (14, 93), (15, 29), (16, 8), (17, 2), (18, 1), (19, 0), (20, 0)] data.sort() pos = bisect.bisect_left(data, (magnitude - 0.99,)) return data[pos][1] else: #try to calculate info = VISinformation() zp = info['zeropoint'] fw = info['fullwellcapacity'] electrons = 10**(-.4*(magnitude - zp)) * exptime mask = 0 for x in pixelFractions: mask += np.round(electrons * x / fw - 0.4) #0.4 as we don't want to mask if say 175k pixels... if star: mask += (20*20) if mask > 2000**2: mask = 2000**2 return mask def areaImpacted(magnitudes=np.arange(0, 20., 1.), offset=0.5, star=False): """ """ s = 0.1 Nvconst = stellarNumberCounts.integratedCountsVband() print '\n mag b l stars CCD area' for b in [20, 25, 30, 50, 90]: for l in [0, 90, 180]: area = 0 prev = 0 for ml in magnitudes: m = s * math.ceil(float(ml + offset) / s) n = stellarNumberCounts.bahcallSoneira(m, l, b, Nvconst) n -= prev #subtract the number of stars in the previous bin ccd = n * 49.6 / 3600 covering = pixelsImpacted(m, star=star) area += (ccd * covering) / (4096 * 4132.) * 100. prev = n #store the number of stars in the current bin if area > 100: area = 100. txt = '%.1f %2d %3d %.2f %.1f %.3f' % (m, b, l, n, ccd, area) print txt if b == 90: #no need to do more than once... l is irrelevant break print '\n\n\nIntegrated Area Loss:' blow=20 bhigh=90 llow=0 lhigh=180 bnum=71 lnum=181 prev = 0 for i, ml in enumerate(magnitudes): m = s * math.ceil(float(ml + offset) / s) l, b, counts = stellarNumberCounts.skyNumbers(m, blow, bhigh, llow, lhigh, bnum, lnum) counts -= prev prev = counts.copy() #average stars = np.mean(counts) ccd = stars * 49.6 / 3600 covering = pixelsImpacted(m, star=star) area = (ccd * covering) / (4096 * 4132.) * 100. if area > 100: area = 100. print 'magnitude = %.1f, average = %.5f, max = %.5f' % (ml, stars, np.max(counts)) print '%i stars per square degree will mean %i stars per CCD and thus an area loss of %.4f per cent' % \ (stars, ccd, area) if i < 1: z = counts * covering / (4096 * 4132. * (4096 * 0.1 * 4132 * 0.1 / 60. / 60.)) * 100. else: z += counts * covering / (4096 * 4132. * (4096 * 0.1 * 4132 * 0.1 / 60. / 60.)) * 100. msk = z > 100 z[msk] = 100. _areaLossPlot(m, b, l, z, blow, bhigh, llow, lhigh, bnum, lnum, 'Masking of Saturated Pixels From All Stars', 'AreaLoss') def _areaLossPlot(maglimit, b, l, z, blow, bhigh, llow, lhigh, bnum, lnum, title, output): """ Generate a plot showing the area loss as a function of galactic coordinates for given magnitude limit. :param maglimit: :param b: :param l: :param z: :param blow: :param bhigh: :param llow: :param lhigh: :param bnum: :param lnum: :return: """ from kapteyn import maputils header = {'NAXIS': 2, 'NAXIS1': len(l), 'NAXIS2': len(b), 'CTYPE1': 'GLON', 'CRVAL1': llow, 'CRPIX1': 0, 'CUNIT1': 'deg', 'CDELT1': float(bhigh-blow)/bnum, 'CTYPE2': 'GLAT', 'CRVAL2': blow, 'CRPIX2': 0, 'CUNIT2': 'deg', 'CDELT2': float(lhigh-llow)/lnum} fig = plt.figure(figsize=(12, 7)) frame1 = fig.add_axes([0.1, 0.1, 0.85, 0.85]) #generate image f = maputils.FITSimage(externalheader=header, externaldata=z) im1 = f.Annotatedimage(frame1) grat1 = im1.Graticule(skyout='Galactic', starty=blow, deltay=10, startx=llow, deltax=20) colorbar = im1.Colorbar(orientation='horizontal') colorbar.set_label(label=r'Imaging Area Lost Because of Saturated Pixels [\%]', fontsize=18) im1.Image() im1.plot() title += r' $V \leq %.1f$' % maglimit frame1.set_title(title, y=1.02) plt.savefig(output + '%i.pdf' % maglimit) plt.close() def _test(): pixels = np.vectorize(pixelsImpacted) mag = np.arange(0, 19., 1) + 0.4 rs = pixels(mag) for m, val in zip(mag, rs): print m, val print '\n\n\n0 mag' n = stellarNumberCounts.bahcallSoneira(1, 0, 20, stellarNumberCounts.integratedCountsVband()) print 'objects perCCD areaLoss' print n, n * 49.6 / 3600 , n * pixelsImpacted(1) * 49.6 / 3600 / (4096 * 4132.) * 100. print '\n10 mag' n = stellarNumberCounts.bahcallSoneira(10, 0, 20, stellarNumberCounts.integratedCountsVband()) print 'objects perCCD areaLoss' print n, n * 49.6 / 3600 , n * pixelsImpacted(10) * 49.6 / 3600 / (4096 * 4132.) * 100. print '\n18 mag' n = stellarNumberCounts.bahcallSoneira(18, 0, 20, stellarNumberCounts.integratedCountsVband()) print 'objects perCCD areaLoss' print n, n * 49.6 / 3600 , n * pixelsImpacted(18) * 49.6 / 3600 / (4096 * 4132.) * 100. for m in mag: n = stellarNumberCounts.bahcallSoneira(m, 0, 20, stellarNumberCounts.integratedCountsVband()) print m, n * 49.6 / 3600 if __name__ == '__main__': _test() #areaImpacted()
bsd-2-clause
boltunoff/turo
turo_scraping/genr_turo_parse.py
1
13076
from selenium import webdriver from bs4 import BeautifulSoup import time import datetime from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from random import choice import os.path import logging import pandas as pd #add generic log file location and name logging.basicConfig(filename='turotask_minivans.log', filemode="w", level=logging.INFO,format='%(asctime)s %(message)s') #Logging usage example: #logging.debug("This is a debug message") #logging.info("Informational message") #logging.error("An error has happened!") logging.info("Job started") def driver_set(): ua_list = [ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/48.0.2564.82 Chrome/48.0.2564.82 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36", "Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36" ] dcap = dict(DesiredCapabilities.PHANTOMJS, javascriptEnabled=True) dcap["phantomjs.page.settings.resourceTimeout"] = 15 dcap["phantomjs.page.settings.loadImages"] = True dcap["phantomjs.page.settings.userAgent"] = choice(ua_list) driver = webdriver.PhantomJS(desired_capabilities=dcap) # PhantomJs should be in the same dir of python.py file within project driver.set_window_size(1920,1080) return driver driver = driver_set() # car_types: SUVS, MINIVANS, CARS. # to search only for regular cars, excluding suvs and minivans, need to apply filter on web page, instead of just # specifying url with /cars... # add url and html objects for /cars only search results def navigate_to_base_url(car_type, city): url = "https://turo.com/rentals/%s" % car_type driver.get(url) driver.implicitly_wait(3) time.sleep(5) #click input search form: driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[1]/input[1]').click() driver.implicitly_wait(3) time.sleep(5) #enter text into search box for locatoin: driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[1]/input[1]').send_keys(city) driver.implicitly_wait(3) #also search by Chicago keyword, more cars available time.sleep(5) #click search button in Ohare for a week ahead try: driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[4]/button').click() driver.implicitly_wait(3) time.sleep(5) except Exception as e: driver.save_screenshot('button.png') raise driver.implicitly_wait(3) time.sleep(5) url_now = driver.current_url # URL for Current week search print(url_now) return url_now # parse current URL to find current search dates and update search dates for weeks ahead and for 3 days search # update current URL with new start/end dates +7 days; +3 days #all_cars[0].prettify() #https://turo.com/search?type=10&location=Chicago%2C%20IL%2C%20USA&country=US&region=IL&locationType=City&models=&makes=&fromYear=0&startDate=06%2F27%2F2017&startTime=10%3A00&endDate=07%2F04%2F2017&endTime=10%3A00&category=ALL&instantBook=false&customDelivery=false&maximumDistanceInMiles=30&sortType=RELEVANCE&isMapSearch=false&latitude=41.8781136&longitude=-87.6297982&defaultZoomLevel=11 #TODO: consider user input as: Type of Car(minivans, suvs, cars, trucks), City, Start Date and End Date. # ??? use above url as a base having user inputs as a parameters...? url_now = navigate_to_base_url('minivans', 'Chicago') logging.info("Cleaning and parsing dates from URL") def cln_dates(url_now): url_lst = url_now.split("&") for s in url_lst: if 'startDate' in s: startDate = s elif 'endDate' in s: endDate = s start_list = startDate.split('=') start_date_str = start_list[1].replace("2F", "-").split("%") start_dt = "".join(start_date_str) end_list = endDate.split('=') end_date_str = end_list[1].replace("2F", "-").split("%") end_dt = "".join(end_date_str) return start_dt, end_dt #(u'06-12-2017', u'06-19-2017') strings #TODO convert to date formate and add 7 or 3 days and create new url with them #startDate=05%2F11%2F2017 and endDate=05%2F18%2F2017 # a = u'06-27-2017' # a1 = datetime.datetime.strptime(a,"%m-%d-%Y") # a17 = a1 + datetime.timedelta(days=7) # a17s = datetime.datetime.strftime(a17,"%m-%d-%Y") def parse_data(): # add argument url_now #driver = driver_set() soup = BeautifulSoup(driver.page_source, "html.parser") #driver.close() #use the heirarchical nature of HTML structure to grab precisely the content that I am interested in # I will grab all of the elements that are within "li" tags and are also members of class "u-baseBottomMargin" all_cars = soup.find_all('li', {'class': 'u-baseBottomMargin'}) # //*[@id="pageContainer-content"]/div[3]/div[3]/div[2]/div/div[2]).get_get #print "Example of the 1st element with text of HTML soup object: \n", all_cars[0] #all_cars[0].span.get_text() hrefs_list = [] # parse link to find car ID car_id_list = [] year_list = [] make_list = [] # Make Model for i in all_cars: # trying to combine all data elements in one loop and to add to dict hrefs_list.append(i.a["href"]) href = i.a["href"] car_id = href.split("/")[6][:6] # 6th element of the link, and 6th element of ID node /654321/ car_id_list.append(car_id) year = i.span.get_text() year_list.append(i.span.get_text()) model = i.p.get_text() make_list.append(i.p.get_text()) prices = soup.find_all('p', {'class': 'vehicleWithDetails-value'}) # different element for pricess, couldn't get_text from all_cars price_list = [] # Prices for i in prices: price_list.append(i.get_text()) logging.info("Prices conversion to floats started") price_list_fl = [float(e) for e in price_list] #convert unicode to floats #print "Minimum price for today: ", min(price_list_fl) #print "Maximum price for today: ", max(price_list_fl) #print "Average price for today: ", sum(price_list_fl)/float(len(price_list_fl)) #resolve unicode publn_dt = time.strftime("%m/%d/%Y %H:%M:%S") print("Search on: ", publn_dt) logging.info("Found %d prices for the cars", len(price_list_fl)) search_start_dt, search_end_dt = cln_dates(url_now) #calling function to get dates data = { 'search_start_dt': search_start_dt, 'search_end_dt':search_end_dt, 'publn_dt' : publn_dt, 'links' : hrefs_list, 'car_id' : car_id_list, "year" : year_list, 'make' : make_list, 'price' : price_list_fl } df = pd.DataFrame(data) df = df[['car_id','links','year','make','price','search_start_dt','search_end_dt','publn_dt']] #changing order of DF print(df) return df fname = 'turo_minivans_data.csv' import logging # add filemode="w" to overwrite #logging.basicConfig(filename="turo_parse3.log", filemode="w", level=logging.INFO) #logging.debug("This is a debug message") #logging.info("Informational message") #logging.error("An error has happened!") df = parse_data() import os.path logging.info("Writing data to CSV file... %s " % fname) def write_file(df): try: if os.path.exists(fname): #todo may need to check if the load for the same date exists? with open(fname, 'a') as f: df.to_csv(f, header=False, index=False) print(len(df), "records written to CSV file %s" % fname) else: with open(fname, 'a') as f: df.to_csv(f, header=True, index=False) print(len(df), "records written to CSV file %s" % fname) except IOError: logging.error("Can't open %s. Please check if the %s is now open" %(fname,fname)) logging.info("Nothing is written to the file") data = write_file(df) print('Job is done for one iteration') # calculating future dates from the url_now s, e = cln_dates(url_now) s_dt = datetime.datetime.strptime(s, "%m-%d-%Y") #convert to date format from string e_dt_in6m = datetime.timedelta(days = 7 *26) # start date in url in 6 months(26 weeks) def timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)): #returns dates for 6 months ahead curr_dt_plus7 = s_dt + datetime.timedelta(days = 7) # first search date for a week ahead while curr_dt_plus7 < e_dt_in6m: yield curr_dt_plus7 curr_dt_plus7 += delta #for day in timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)): #url_coll = # print day def date_repl_url(): for day in timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)): url_coll = [] s_dt_str = datetime.datetime.strftime(day, '%m-%d-%Y') s_dt_str_url = s_dt_str.replace('-', '%2F') def future_url(): # 1. add 7 days to start and end dt # 2. substitute dates in URL # 3. run all the routine again. # convert Dates like this: # startDate=05%2F11%2F2017 and endDate=05%2F18%2F2017 # a = u'06-27-2017' # a1 = datetime.datetime.strptime(a,"%m-%d-%Y") # a17 = a1 + datetime.timedelta(days=7) # a17s = datetime.datetime.strftime(a17,"%m-%d-%Y") pass # good example: https://github.com/fankcoder/findtrip/blob/master/findtrip/findtrip/spiders/spider_ctrip.py # https://github.com/ianibo/SirsiDynixIBistroScraper/blob/master/scraper.py #TODO: search on diff timelines: a week ahead, a month ahead, 6 months ahead #TODO: when future dates function is ready, decouple main functions from iterations(each week, each car type, etc), # create logs for each iteration. # TODO: stopped working... # change how ContentContainer is searched: # url = "https://turo.com/search?country=US&defaultZoomLevel=11&endDate=12%2F21%2F2018&endTime=10%3A00&international=true&isMapSearch=false&itemsPerPage=200&latitude=41.8781136&location=Chicago%2C%20IL%2C%20USA&locationType=City&longitude=-87.6297982&maximumDistanceInMiles=30&region=IL&sortType=RELEVANCE&startDate=12%2F18%2F2018&startTime=10%3A00" # driver.get(url) # soup = BeautifulSoup(driver.page_source,"html.parser") # pageContainercontent = soup.find_all('div', attrs={'id': 'pageContainer-content'}) # # Python 3.6 use new library to render JS: # https://html.python-requests.org/ # from requests_html import HTMLSession # session = HTMLSession() # r = session.get(url) #r.html.render(sleep=10) # r.html.render(sleep=10) #//*[@id="pageContainer-content"]/div[3]/div/div/div[2]/div/div[1]/div/div/div[1]/div/div[1]/div/div/a # New search # 'https://turo.com/rentals/minivans' # driver.find_element_by_xpath('//*[@id="search-input-header"]').click() # top search bar activating # driver.find_element_by_xpath('//*[@id="search-input-header"]').send_keys('chicago') # top search bar typed city/location # driver.find_element_by_xpath('//*[@id="pageContainer"]/header/div/div[1]/form/span[1]/button').click() # top bar search button # driver.current_url: # 'https://turo.com/search?location=chicago&country=&region=&locationType=' ############## ############## # driver.execute() - running JavaScript # driver.get(url) # searchbar_el = driver.find_element_by_xpath('//*[@id="search-input-header"]') # found element with JavaScript # driver.execute_script("arguments[0].value='Chicago';", searchbar_el) # passing Chicago to JavaScript, running JS url = 'https://turo.com/' driver.get(url) search_form_el = driver.find_element_by_xpath('//*[@id="js-searchFormExpandedLocationInput"]') # found element with JavaScript driver.save_screenshot('chgo_srch.png') driver.execute_script("arguments[0].value='Chicago';", search_form_el) # passing Chicago to JavaScript, running JS driver.save_screenshot('chgo_srch.png') driver.find_element_by_xpath('//*[@id="js-searchFormExpanded"]/button[2]').click() # click search button driver.save_screenshot('chgo_srch.png') driver.current_url # 'https://turo.com/search?location=Chicago&country=&region=&locationType=&startDate=11%2F12%2F2018&startTime=10%3A00&endDate=11%2F19%2F2018&endTime=10%3A00' ##### from this url drilling down to search results: db_srch_el = driver.find_element_by_xpath('//*[@id="pageContainer"]') pageContainercontent = db_srch_el.find_element_by_xpath('//*[@id="pageContainer-content"]') ### !!! stuck here on rework.. can't find links to cars from pageContainer-content :( # try... chromdriver - it renders javascript ? # try adding library: from requests_html import HTMLSession to get all the links for the cars...?
gpl-3.0
GuessWhoSamFoo/pandas
pandas/tests/frame/test_timeseries.py
1
32091
# -*- coding: utf-8 -*- from __future__ import print_function from datetime import datetime, time import numpy as np import pytest from pandas.compat import product import pandas as pd from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, Series, Timestamp, date_range, period_range, to_datetime) from pandas.tests.frame.common import TestData import pandas.util.testing as tm from pandas.util.testing import ( assert_frame_equal, assert_index_equal, assert_series_equal) import pandas.tseries.offsets as offsets @pytest.fixture(params=product([True, False], [True, False])) def close_open_fixture(request): return request.param class TestDataFrameTimeSeriesMethods(TestData): def test_diff(self): the_diff = self.tsframe.diff(1) assert_series_equal(the_diff['A'], self.tsframe['A'] - self.tsframe['A'].shift(1)) # int dtype a = 10000000000000000 b = a + 1 s = Series([a, b]) rs = DataFrame({'s': s}).diff() assert rs.s[1] == 1 # mixed numeric tf = self.tsframe.astype('float32') the_diff = tf.diff(1) assert_series_equal(the_diff['A'], tf['A'] - tf['A'].shift(1)) # issue 10907 df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])}) df.insert(0, 'x', 1) result = df.diff(axis=1) expected = pd.DataFrame({'x': np.nan, 'y': pd.Series( 1), 'z': pd.Series(1)}).astype('float64') assert_frame_equal(result, expected) @pytest.mark.parametrize('tz', [None, 'UTC']) def test_diff_datetime_axis0(self, tz): # GH 18578 df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz), 1: date_range('2010', freq='D', periods=2, tz=tz)}) result = df.diff(axis=0) expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']), 1: pd.TimedeltaIndex(['NaT', '1 days'])}) assert_frame_equal(result, expected) @pytest.mark.parametrize('tz', [None, 'UTC']) def test_diff_datetime_axis1(self, tz): # GH 18578 df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz), 1: date_range('2010', freq='D', periods=2, tz=tz)}) if tz is None: result = df.diff(axis=1) expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']), 1: pd.TimedeltaIndex(['0 days', '0 days'])}) assert_frame_equal(result, expected) else: with pytest.raises(NotImplementedError): result = df.diff(axis=1) def test_diff_timedelta(self): # GH 4533 df = DataFrame(dict(time=[Timestamp('20130101 9:01'), Timestamp('20130101 9:02')], value=[1.0, 2.0])) res = df.diff() exp = DataFrame([[pd.NaT, np.nan], [pd.Timedelta('00:01:00'), 1]], columns=['time', 'value']) assert_frame_equal(res, exp) def test_diff_mixed_dtype(self): df = DataFrame(np.random.randn(5, 3)) df['A'] = np.array([1, 2, 3, 4, 5], dtype=object) result = df.diff() assert result[0].dtype == np.float64 def test_diff_neg_n(self): rs = self.tsframe.diff(-1) xp = self.tsframe - self.tsframe.shift(-1) assert_frame_equal(rs, xp) def test_diff_float_n(self): rs = self.tsframe.diff(1.) xp = self.tsframe.diff(1) assert_frame_equal(rs, xp) def test_diff_axis(self): # GH 9727 df = DataFrame([[1., 2.], [3., 4.]]) assert_frame_equal(df.diff(axis=1), DataFrame( [[np.nan, 1.], [np.nan, 1.]])) assert_frame_equal(df.diff(axis=0), DataFrame( [[np.nan, np.nan], [2., 2.]])) def test_pct_change(self): rs = self.tsframe.pct_change(fill_method=None) assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1) rs = self.tsframe.pct_change(2) filled = self.tsframe.fillna(method='pad') assert_frame_equal(rs, filled / filled.shift(2) - 1) rs = self.tsframe.pct_change(fill_method='bfill', limit=1) filled = self.tsframe.fillna(method='bfill', limit=1) assert_frame_equal(rs, filled / filled.shift(1) - 1) rs = self.tsframe.pct_change(freq='5D') filled = self.tsframe.fillna(method='pad') assert_frame_equal(rs, (filled / filled.shift(freq='5D') - 1) .reindex_like(filled)) def test_pct_change_shift_over_nas(self): s = Series([1., 1.5, np.nan, 2.5, 3.]) df = DataFrame({'a': s, 'b': s}) chg = df.pct_change() expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2]) edf = DataFrame({'a': expected, 'b': expected}) assert_frame_equal(chg, edf) @pytest.mark.parametrize("freq, periods, fill_method, limit", [('5B', 5, None, None), ('3B', 3, None, None), ('3B', 3, 'bfill', None), ('7B', 7, 'pad', 1), ('7B', 7, 'bfill', 3), ('14B', 14, None, None)]) def test_pct_change_periods_freq(self, freq, periods, fill_method, limit): # GH 7292 rs_freq = self.tsframe.pct_change(freq=freq, fill_method=fill_method, limit=limit) rs_periods = self.tsframe.pct_change(periods, fill_method=fill_method, limit=limit) assert_frame_equal(rs_freq, rs_periods) empty_ts = DataFrame(index=self.tsframe.index, columns=self.tsframe.columns) rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit) rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit) assert_frame_equal(rs_freq, rs_periods) def test_frame_ctor_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') dates = np.asarray(rng) df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates}) assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')) def test_frame_append_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') df = DataFrame(index=np.arange(len(rng))) df['A'] = rng assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')) def test_frame_datetime64_pre1900_repr(self): df = DataFrame({'year': date_range('1/1/1700', periods=50, freq='A-DEC')}) # it works! repr(df) def test_frame_append_datetime64_col_other_units(self): n = 100 units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y'] ns_dtype = np.dtype('M8[ns]') for unit in units: dtype = np.dtype('M8[%s]' % unit) vals = np.arange(n, dtype=np.int64).view(dtype) df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) df[unit] = vals ex_vals = to_datetime(vals.astype('O')).values assert df[unit].dtype == ns_dtype assert (df[unit].values == ex_vals).all() # Test insertion into existing datetime64 column df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype) for unit in units: dtype = np.dtype('M8[%s]' % unit) vals = np.arange(n, dtype=np.int64).view(dtype) tmp = df.copy() tmp['dates'] = vals ex_vals = to_datetime(vals.astype('O')).values assert (tmp['dates'].values == ex_vals).all() def test_shift(self): # naive shift shiftedFrame = self.tsframe.shift(5) tm.assert_index_equal(shiftedFrame.index, self.tsframe.index) shiftedSeries = self.tsframe['A'].shift(5) assert_series_equal(shiftedFrame['A'], shiftedSeries) shiftedFrame = self.tsframe.shift(-5) tm.assert_index_equal(shiftedFrame.index, self.tsframe.index) shiftedSeries = self.tsframe['A'].shift(-5) assert_series_equal(shiftedFrame['A'], shiftedSeries) # shift by 0 unshifted = self.tsframe.shift(0) assert_frame_equal(unshifted, self.tsframe) # shift by DateOffset shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay()) assert len(shiftedFrame) == len(self.tsframe) shiftedFrame2 = self.tsframe.shift(5, freq='B') assert_frame_equal(shiftedFrame, shiftedFrame2) d = self.tsframe.index[0] shifted_d = d + offsets.BDay(5) assert_series_equal(self.tsframe.xs(d), shiftedFrame.xs(shifted_d), check_names=False) # shift int frame int_shifted = self.intframe.shift(1) # noqa # Shifting with PeriodIndex ps = tm.makePeriodFrame() shifted = ps.shift(1) unshifted = shifted.shift(-1) tm.assert_index_equal(shifted.index, ps.index) tm.assert_index_equal(unshifted.index, ps.index) tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values) shifted2 = ps.shift(1, 'B') shifted3 = ps.shift(1, offsets.BDay()) assert_frame_equal(shifted2, shifted3) assert_frame_equal(ps, shifted2.shift(-1, 'B')) msg = 'does not match PeriodIndex freq' with pytest.raises(ValueError, match=msg): ps.shift(freq='D') # shift other axis # GH 6371 df = DataFrame(np.random.rand(10, 5)) expected = pd.concat([DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], ignore_index=True, axis=1) result = df.shift(1, axis=1) assert_frame_equal(result, expected) # shift named axis df = DataFrame(np.random.rand(10, 5)) expected = pd.concat([DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], ignore_index=True, axis=1) result = df.shift(1, axis='columns') assert_frame_equal(result, expected) def test_shift_bool(self): df = DataFrame({'high': [True, False], 'low': [False, False]}) rs = df.shift(1) xp = DataFrame(np.array([[np.nan, np.nan], [True, False]], dtype=object), columns=['high', 'low']) assert_frame_equal(rs, xp) def test_shift_categorical(self): # GH 9416 s1 = pd.Series(['a', 'b', 'c'], dtype='category') s2 = pd.Series(['A', 'B', 'C'], dtype='category') df = DataFrame({'one': s1, 'two': s2}) rs = df.shift(1) xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)}) assert_frame_equal(rs, xp) def test_shift_fill_value(self): # GH #24128 df = DataFrame([1, 2, 3, 4, 5], index=date_range('1/1/2000', periods=5, freq='H')) exp = DataFrame([0, 1, 2, 3, 4], index=date_range('1/1/2000', periods=5, freq='H')) result = df.shift(1, fill_value=0) assert_frame_equal(result, exp) exp = DataFrame([0, 0, 1, 2, 3], index=date_range('1/1/2000', periods=5, freq='H')) result = df.shift(2, fill_value=0) assert_frame_equal(result, exp) def test_shift_empty(self): # Regression test for #8019 df = DataFrame({'foo': []}) rs = df.shift(-1) assert_frame_equal(df, rs) def test_shift_duplicate_columns(self): # GH 9092; verify that position-based shifting works # in the presence of duplicate columns column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]] data = np.random.randn(20, 5) shifted = [] for columns in column_lists: df = pd.DataFrame(data.copy(), columns=columns) for s in range(5): df.iloc[:, s] = df.iloc[:, s].shift(s + 1) df.columns = range(5) shifted.append(df) # sanity check the base case nulls = shifted[0].isna().sum() assert_series_equal(nulls, Series(range(1, 6), dtype='int64')) # check all answers are the same assert_frame_equal(shifted[0], shifted[1]) assert_frame_equal(shifted[0], shifted[2]) def test_tshift(self): # PeriodIndex ps = tm.makePeriodFrame() shifted = ps.tshift(1) unshifted = shifted.tshift(-1) assert_frame_equal(unshifted, ps) shifted2 = ps.tshift(freq='B') assert_frame_equal(shifted, shifted2) shifted3 = ps.tshift(freq=offsets.BDay()) assert_frame_equal(shifted, shifted3) with pytest.raises(ValueError, match='does not match'): ps.tshift(freq='M') # DatetimeIndex shifted = self.tsframe.tshift(1) unshifted = shifted.tshift(-1) assert_frame_equal(self.tsframe, unshifted) shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq) assert_frame_equal(shifted, shifted2) inferred_ts = DataFrame(self.tsframe.values, Index(np.asarray(self.tsframe.index)), columns=self.tsframe.columns) shifted = inferred_ts.tshift(1) unshifted = shifted.tshift(-1) assert_frame_equal(shifted, self.tsframe.tshift(1)) assert_frame_equal(unshifted, inferred_ts) no_freq = self.tsframe.iloc[[0, 5, 7], :] pytest.raises(ValueError, no_freq.tshift) def test_truncate(self): ts = self.tsframe[::3] start, end = self.tsframe.index[3], self.tsframe.index[6] start_missing = self.tsframe.index[2] end_missing = self.tsframe.index[7] # neither specified truncated = ts.truncate() assert_frame_equal(truncated, ts) # both specified expected = ts[1:3] truncated = ts.truncate(start, end) assert_frame_equal(truncated, expected) truncated = ts.truncate(start_missing, end_missing) assert_frame_equal(truncated, expected) # start specified expected = ts[1:] truncated = ts.truncate(before=start) assert_frame_equal(truncated, expected) truncated = ts.truncate(before=start_missing) assert_frame_equal(truncated, expected) # end specified expected = ts[:3] truncated = ts.truncate(after=end) assert_frame_equal(truncated, expected) truncated = ts.truncate(after=end_missing) assert_frame_equal(truncated, expected) pytest.raises(ValueError, ts.truncate, before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq) def test_truncate_copy(self): index = self.tsframe.index truncated = self.tsframe.truncate(index[5], index[10]) truncated.values[:] = 5. assert not (self.tsframe.values[5:11] == 5).any() def test_truncate_nonsortedindex(self): # GH 17935 df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']}, index=[5, 3, 2, 9, 0]) msg = 'truncate requires a sorted index' with pytest.raises(ValueError, match=msg): df.truncate(before=3, after=9) rng = pd.date_range('2011-01-01', '2012-01-01', freq='W') ts = pd.DataFrame({'A': np.random.randn(len(rng)), 'B': np.random.randn(len(rng))}, index=rng) msg = 'truncate requires a sorted index' with pytest.raises(ValueError, match=msg): ts.sort_values('A', ascending=False).truncate(before='2011-11', after='2011-12') df = pd.DataFrame({3: np.random.randn(5), 20: np.random.randn(5), 2: np.random.randn(5), 0: np.random.randn(5)}, columns=[3, 20, 2, 0]) msg = 'truncate requires a sorted index' with pytest.raises(ValueError, match=msg): df.truncate(before=2, after=20, axis=1) def test_asfreq(self): offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd()) rule_monthly = self.tsframe.asfreq('BM') tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A']) filled = rule_monthly.asfreq('B', method='pad') # noqa # TODO: actually check that this worked. # don't forget! filled_dep = rule_monthly.asfreq('B', method='pad') # noqa # test does not blow up on length-0 DataFrame zero_length = self.tsframe.reindex([]) result = zero_length.asfreq('BM') assert result is not zero_length def test_asfreq_datetimeindex(self): df = DataFrame({'A': [1, 2, 3]}, index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)]) df = df.asfreq('B') assert isinstance(df.index, DatetimeIndex) ts = df['A'].asfreq('B') assert isinstance(ts.index, DatetimeIndex) def test_asfreq_fillvalue(self): # test for fill value during upsampling, related to issue 3715 # setup rng = pd.date_range('1/1/2016', periods=10, freq='2S') ts = pd.Series(np.arange(len(rng)), index=rng) df = pd.DataFrame({'one': ts}) # insert pre-existing missing value df.loc['2016-01-01 00:00:08', 'one'] = None actual_df = df.asfreq(freq='1S', fill_value=9.0) expected_df = df.asfreq(freq='1S').fillna(9.0) expected_df.loc['2016-01-01 00:00:08', 'one'] = None assert_frame_equal(expected_df, actual_df) expected_series = ts.asfreq(freq='1S').fillna(9.0) actual_series = ts.asfreq(freq='1S', fill_value=9.0) assert_series_equal(expected_series, actual_series) @pytest.mark.parametrize("data,idx,expected_first,expected_last", [ ({'A': [1, 2, 3]}, [1, 1, 2], 1, 2), ({'A': [1, 2, 3]}, [1, 2, 2], 1, 2), ({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'), ({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2), ({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2), ({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)]) def test_first_last_valid(self, data, idx, expected_first, expected_last): N = len(self.frame.index) mat = np.random.randn(N) mat[:5] = np.nan mat[-5:] = np.nan frame = DataFrame({'foo': mat}, index=self.frame.index) index = frame.first_valid_index() assert index == frame.index[5] index = frame.last_valid_index() assert index == frame.index[-6] # GH12800 empty = DataFrame() assert empty.last_valid_index() is None assert empty.first_valid_index() is None # GH17400: no valid entries frame[:] = np.nan assert frame.last_valid_index() is None assert frame.first_valid_index() is None # GH20499: its preserves freq with holes frame.index = date_range("20110101", periods=N, freq="B") frame.iloc[1] = 1 frame.iloc[-2] = 1 assert frame.first_valid_index() == frame.index[1] assert frame.last_valid_index() == frame.index[-2] assert frame.first_valid_index().freq == frame.index.freq assert frame.last_valid_index().freq == frame.index.freq # GH 21441 df = DataFrame(data, index=idx) assert expected_first == df.first_valid_index() assert expected_last == df.last_valid_index() def test_first_subset(self): ts = tm.makeTimeDataFrame(freq='12h') result = ts.first('10d') assert len(result) == 20 ts = tm.makeTimeDataFrame(freq='D') result = ts.first('10d') assert len(result) == 10 result = ts.first('3M') expected = ts[:'3/31/2000'] assert_frame_equal(result, expected) result = ts.first('21D') expected = ts[:21] assert_frame_equal(result, expected) result = ts[:0].first('3M') assert_frame_equal(result, ts[:0]) def test_first_raises(self): # GH20725 df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) with pytest.raises(TypeError): # index is not a DatetimeIndex df.first('1D') def test_last_subset(self): ts = tm.makeTimeDataFrame(freq='12h') result = ts.last('10d') assert len(result) == 20 ts = tm.makeTimeDataFrame(nper=30, freq='D') result = ts.last('10d') assert len(result) == 10 result = ts.last('21D') expected = ts['2000-01-10':] assert_frame_equal(result, expected) result = ts.last('21D') expected = ts[-21:] assert_frame_equal(result, expected) result = ts[:0].last('3M') assert_frame_equal(result, ts[:0]) def test_last_raises(self): # GH20725 df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) with pytest.raises(TypeError): # index is not a DatetimeIndex df.last('1D') def test_at_time(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) rs = ts.at_time(rng[1]) assert (rs.index.hour == rng[1].hour).all() assert (rs.index.minute == rng[1].minute).all() assert (rs.index.second == rng[1].second).all() result = ts.at_time('9:30') expected = ts.at_time(time(9, 30)) assert_frame_equal(result, expected) result = ts.loc[time(9, 30)] expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)] assert_frame_equal(result, expected) # midnight, everything rng = date_range('1/1/2000', '1/31/2000') ts = DataFrame(np.random.randn(len(rng), 3), index=rng) result = ts.at_time(time(0, 0)) assert_frame_equal(result, ts) # time doesn't exist rng = date_range('1/1/2012', freq='23Min', periods=384) ts = DataFrame(np.random.randn(len(rng), 2), rng) rs = ts.at_time('16:00') assert len(rs) == 0 def test_at_time_raises(self): # GH20725 df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) with pytest.raises(TypeError): # index is not a DatetimeIndex df.at_time('00:00') @pytest.mark.parametrize('axis', ['index', 'columns', 0, 1]) def test_at_time_axis(self, axis): # issue 8839 rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), len(rng))) ts.index, ts.columns = rng, rng indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] if axis in ['index', 0]: expected = ts.loc[indices, :] elif axis in ['columns', 1]: expected = ts.loc[:, indices] result = ts.at_time('9:30', axis=axis) assert_frame_equal(result, expected) def test_between_time(self, close_open_fixture): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) stime = time(0, 0) etime = time(1, 0) inc_start, inc_end = close_open_fixture filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = 13 * 4 + 1 if not inc_start: exp_len -= 5 if not inc_end: exp_len -= 4 assert len(filtered) == exp_len for rs in filtered.index: t = rs.time() if inc_start: assert t >= stime else: assert t > stime if inc_end: assert t <= etime else: assert t < etime result = ts.between_time('00:00', '01:00') expected = ts.between_time(stime, etime) assert_frame_equal(result, expected) # across midnight rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) stime = time(22, 0) etime = time(9, 0) filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = (12 * 11 + 1) * 4 + 1 if not inc_start: exp_len -= 4 if not inc_end: exp_len -= 4 assert len(filtered) == exp_len for rs in filtered.index: t = rs.time() if inc_start: assert (t >= stime) or (t <= etime) else: assert (t > stime) or (t <= etime) if inc_end: assert (t <= etime) or (t >= stime) else: assert (t < etime) or (t >= stime) def test_between_time_raises(self): # GH20725 df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) with pytest.raises(TypeError): # index is not a DatetimeIndex df.between_time(start_time='00:00', end_time='12:00') def test_between_time_axis(self, axis): # issue 8839 rng = date_range('1/1/2000', periods=100, freq='10min') ts = DataFrame(np.random.randn(len(rng), len(rng))) stime, etime = ('08:00:00', '09:00:00') exp_len = 7 if axis in ['index', 0]: ts.index = rng assert len(ts.between_time(stime, etime)) == exp_len assert len(ts.between_time(stime, etime, axis=0)) == exp_len if axis in ['columns', 1]: ts.columns = rng selected = ts.between_time(stime, etime, axis=1).columns assert len(selected) == exp_len def test_between_time_axis_raises(self, axis): # issue 8839 rng = date_range('1/1/2000', periods=100, freq='10min') mask = np.arange(0, len(rng)) rand_data = np.random.randn(len(rng), len(rng)) ts = DataFrame(rand_data, index=rng, columns=rng) stime, etime = ('08:00:00', '09:00:00') if axis in ['columns', 1]: ts.index = mask pytest.raises(TypeError, ts.between_time, stime, etime) pytest.raises(TypeError, ts.between_time, stime, etime, axis=0) if axis in ['index', 0]: ts.columns = mask pytest.raises(TypeError, ts.between_time, stime, etime, axis=1) def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. df = pd.DataFrame({'foo': [pd.NaT, pd.NaT, pd.Timestamp('2012-05-01')]}) res = df.min() exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"]) tm.assert_series_equal(res, exp) res = df.max() exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"]) tm.assert_series_equal(res, exp) # GH12941, only NaTs are in DataFrame. df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]}) res = df.min() exp = pd.Series([pd.NaT], index=["foo"]) tm.assert_series_equal(res, exp) res = df.max() exp = pd.Series([pd.NaT], index=["foo"]) tm.assert_series_equal(res, exp) def test_datetime_assignment_with_NaT_and_diff_time_units(self): # GH 7492 data_ns = np.array([1, 'nat'], dtype='datetime64[ns]') result = pd.Series(data_ns).to_frame() result['new'] = data_ns expected = pd.DataFrame({0: [1, None], 'new': [1, None]}, dtype='datetime64[ns]') tm.assert_frame_equal(result, expected) # OutOfBoundsDatetime error shouldn't occur data_s = np.array([1, 'nat'], dtype='datetime64[s]') result['new'] = data_s expected = pd.DataFrame({0: [1, None], 'new': [1e9, None]}, dtype='datetime64[ns]') tm.assert_frame_equal(result, expected) def test_frame_to_period(self): K = 5 dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') df = DataFrame(np.random.randn(len(dr), K), index=dr) df['mix'] = 'a' pts = df.to_period() exp = df.copy() exp.index = pr assert_frame_equal(pts, exp) pts = df.to_period('M') tm.assert_index_equal(pts.index, exp.index.asfreq('M')) df = df.T pts = df.to_period(axis=1) exp = df.copy() exp.columns = pr assert_frame_equal(pts, exp) pts = df.to_period('M', axis=1) tm.assert_index_equal(pts.columns, exp.columns.asfreq('M')) pytest.raises(ValueError, df.to_period, axis=2) @pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert']) def test_tz_convert_and_localize(self, fn): l0 = date_range('20140701', periods=5, freq='D') l1 = date_range('20140701', periods=5, freq='D') int_idx = Index(range(5)) if fn == 'tz_convert': l0 = l0.tz_localize('UTC') l1 = l1.tz_localize('UTC') for idx in [l0, l1]: l0_expected = getattr(idx, fn)('US/Pacific') l1_expected = getattr(idx, fn)('US/Pacific') df1 = DataFrame(np.ones(5), index=l0) df1 = getattr(df1, fn)('US/Pacific') assert_index_equal(df1.index, l0_expected) # MultiIndex # GH7846 df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) df3 = getattr(df2, fn)('US/Pacific', level=0) assert not df3.index.levels[0].equals(l0) assert_index_equal(df3.index.levels[0], l0_expected) assert_index_equal(df3.index.levels[1], l1) assert not df3.index.levels[1].equals(l1_expected) df3 = getattr(df2, fn)('US/Pacific', level=1) assert_index_equal(df3.index.levels[0], l0) assert not df3.index.levels[0].equals(l0_expected) assert_index_equal(df3.index.levels[1], l1_expected) assert not df3.index.levels[1].equals(l1) df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) # TODO: untested df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa assert_index_equal(df3.index.levels[0], l0) assert not df3.index.levels[0].equals(l0_expected) assert_index_equal(df3.index.levels[1], l1_expected) assert not df3.index.levels[1].equals(l1) # Bad Inputs # Not DatetimeIndex / PeriodIndex with pytest.raises(TypeError, match='DatetimeIndex'): df = DataFrame(index=int_idx) df = getattr(df, fn)('US/Pacific') # Not DatetimeIndex / PeriodIndex with pytest.raises(TypeError, match='DatetimeIndex'): df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) df = getattr(df, fn)('US/Pacific', level=0) # Invalid level with pytest.raises(ValueError, match='not valid'): df = DataFrame(index=l0) df = getattr(df, fn)('US/Pacific', level=1)
bsd-3-clause
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/pandas/tests/test_downstream.py
9
2584
""" Testing that we work in the downstream packages """ import pytest import numpy as np # noqa from pandas import DataFrame from pandas.compat import PY36 from pandas.util import testing as tm import importlib def import_module(name): # we *only* want to skip if the module is truly not available # and NOT just an actual import error because of pandas changes if PY36: try: return importlib.import_module(name) except ModuleNotFoundError: # noqa pytest.skip("skipping as {} not available".format(name)) else: try: return importlib.import_module(name) except ImportError as e: if "No module named" in str(e) and name in str(e): pytest.skip("skipping as {} not available".format(name)) raise @pytest.fixture def df(): return DataFrame({'A': [1, 2, 3]}) def test_dask(df): toolz = import_module('toolz') # noqa dask = import_module('dask') # noqa import dask.dataframe as dd ddf = dd.from_pandas(df, npartitions=3) assert ddf.A is not None assert ddf.compute() is not None def test_xarray(df): xarray = import_module('xarray') # noqa assert df.to_xarray() is not None @tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa import statsmodels.api as sm import statsmodels.formula.api as smf df = sm.datasets.get_rdataset("Guerry", "HistData").data smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit() def test_scikit_learn(df): sklearn = import_module('sklearn') # noqa from sklearn import svm, datasets digits = datasets.load_digits() clf = svm.SVC(gamma=0.001, C=100.) clf.fit(digits.data[:-1], digits.target[:-1]) clf.predict(digits.data[-1:]) def test_seaborn(): seaborn = import_module('seaborn') tips = seaborn.load_dataset("tips") seaborn.stripplot(x="day", y="total_bill", data=tips) def test_pandas_gbq(df): pandas_gbq = import_module('pandas_gbq') # noqa @tm.network def test_pandas_datareader(): pandas_datareader = import_module('pandas_datareader') # noqa pandas_datareader.get_data_google('AAPL') def test_geopandas(): geopandas = import_module('geopandas') # noqa fp = geopandas.datasets.get_path('naturalearth_lowres') assert geopandas.read_file(fp) is not None def test_pyarrow(df): pyarrow = import_module('pyarrow') # noqa table = pyarrow.Table.from_pandas(df) result = table.to_pandas() tm.assert_frame_equal(result, df)
mit
kjung/scikit-learn
sklearn/linear_model/passive_aggressive.py
60
10566
# Authors: Rob Zinkov, Mathieu Blondel # License: BSD 3 clause from .stochastic_gradient import BaseSGDClassifier from .stochastic_gradient import BaseSGDRegressor from .stochastic_gradient import DEFAULT_EPSILON class PassiveAggressiveClassifier(BaseSGDClassifier): """Passive Aggressive Classifier Read more in the :ref:`User Guide <passive_aggressive>`. Parameters ---------- C : float Maximum step size (regularization). Defaults to 1.0. fit_intercept : bool, default=False Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. loss : string, optional The loss function to be used: hinge: equivalent to PA-I in the reference paper. squared_hinge: equivalent to PA-II in the reference paper. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` .. versionadded:: 0.17 parameter *class_weight* to automatically weight samples. Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- SGDClassifier Perceptron References ---------- Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) """ def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, loss="hinge", n_jobs=1, random_state=None, warm_start=False, class_weight=None): super(PassiveAggressiveClassifier, self).__init__( penalty=None, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, eta0=1.0, warm_start=warm_start, class_weight=class_weight, n_jobs=n_jobs) self.C = C self.loss = loss def partial_fit(self, X, y, classes=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of the training data y : numpy array of shape [n_samples] Subset of the target values classes : array, shape = [n_classes] Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. Returns ------- self : returns an instance of self. """ if self.class_weight == 'balanced': raise ValueError("class_weight 'balanced' is not supported for " "partial_fit. For 'balanced' weights, use " "`sklearn.utils.compute_class_weight` with " "`class_weight='balanced'`. In place of y you " "can use a large enough subset of the full " "training set target to properly estimate the " "class frequency distributions. Pass the " "resulting weights as the class_weight " "parameter.") lr = "pa1" if self.loss == "hinge" else "pa2" return self._partial_fit(X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, n_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_classes,n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [n_classes] The initial intercept to warm-start the optimization. Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "hinge" else "pa2" return self._fit(X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init) class PassiveAggressiveRegressor(BaseSGDRegressor): """Passive Aggressive Regressor Read more in the :ref:`User Guide <passive_aggressive>`. Parameters ---------- C : float Maximum step size (regularization). Defaults to 1.0. epsilon : float If the difference between the current prediction and the correct label is below this threshold, the model is not updated. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level loss : string, optional The loss function to be used: epsilon_insensitive: equivalent to PA-I in the reference paper. squared_epsilon_insensitive: equivalent to PA-II in the reference paper. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- SGDRegressor References ---------- Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) """ def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, loss="epsilon_insensitive", epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False): super(PassiveAggressiveRegressor, self).__init__( penalty=None, l1_ratio=0, epsilon=epsilon, eta0=1.0, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, warm_start=warm_start) self.C = C self.loss = loss def partial_fit(self, X, y): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of training data y : numpy array of shape [n_samples] Subset of target values Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._partial_fit(X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, n_iter=1, sample_weight=None, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._fit(X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)
bsd-3-clause
pbrod/scipy
scipy/spatial/_spherical_voronoi.py
12
11835
""" Spherical Voronoi Code .. versionadded:: 0.18.0 """ # # Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson, # Nikolai Nowaczyk, Joe Pitt-Francis, 2015. # # Distributed under the same BSD license as Scipy. # import numpy as np import numpy.matlib import scipy import itertools from . import _voronoi __all__ = ['SphericalVoronoi'] def calc_circumcenters(tetrahedrons): """ Calculates the cirumcenters of the circumspheres of tetrahedrons. An implementation based on http://mathworld.wolfram.com/Circumsphere.html Parameters ---------- tetrahedrons : an array of shape (N, 4, 3) consisting of N tetrahedrons defined by 4 points in 3D Returns ---------- circumcenters : an array of shape (N, 3) consisting of the N circumcenters of the tetrahedrons in 3D """ num = tetrahedrons.shape[0] a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2) sums = np.sum(tetrahedrons ** 2, axis=2) d = np.concatenate((sums[:, :, np.newaxis], a), axis=2) dx = np.delete(d, 1, axis=2) dy = np.delete(d, 2, axis=2) dz = np.delete(d, 3, axis=2) dx = np.linalg.det(dx) dy = -np.linalg.det(dy) dz = np.linalg.det(dz) a = np.linalg.det(a) nominator = np.vstack((dx, dy, dz)) denominator = 2*a return (nominator / denominator).T def project_to_sphere(points, center, radius): """ Projects the elements of points onto the sphere defined by center and radius. Parameters ---------- points : array of floats of shape (npoints, ndim) consisting of the points in a space of dimension ndim center : array of floats of shape (ndim,) the center of the sphere to project on radius : float the radius of the sphere to project on returns: array of floats of shape (npoints, ndim) the points projected onto the sphere """ lengths = scipy.spatial.distance.cdist(points, np.array([center])) return (points - center) / lengths * radius + center class SphericalVoronoi: """ Voronoi diagrams on the surface of a sphere. .. versionadded:: 0.18.0 Parameters ---------- points : ndarray of floats, shape (npoints, 3) Coordinates of points to construct a spherical Voronoi diagram from radius : float, optional Radius of the sphere (Default: 1) center : ndarray of floats, shape (3,) Center of sphere (Default: origin) Attributes ---------- points : double array of shape (npoints, 3) the points in 3D to generate the Voronoi diagram from radius : double radius of the sphere Default: None (forces estimation, which is less precise) center : double array of shape (3,) center of the sphere Default: None (assumes sphere is centered at origin) vertices : double array of shape (nvertices, 3) Voronoi vertices corresponding to points regions : list of list of integers of shape (npoints, _ ) the n-th entry is a list consisting of the indices of the vertices belonging to the n-th point in points Notes ---------- The spherical Voronoi diagram algorithm proceeds as follows. The Convex Hull of the input points (generators) is calculated, and is equivalent to their Delaunay triangulation on the surface of the sphere [Caroli]_. A 3D Delaunay tetrahedralization is obtained by including the origin of the coordinate system as the fourth vertex of each simplex of the Convex Hull. The circumcenters of all tetrahedra in the system are calculated and projected to the surface of the sphere, producing the Voronoi vertices. The Delaunay tetrahedralization neighbour information is then used to order the Voronoi region vertices around each generator. The latter approach is substantially less sensitive to floating point issues than angle-based methods of Voronoi region vertex sorting. The surface area of spherical polygons is calculated by decomposing them into triangles and using L'Huilier's Theorem to calculate the spherical excess of each triangle [Weisstein]_. The sum of the spherical excesses is multiplied by the square of the sphere radius to obtain the surface area of the spherical polygon. For nearly-degenerate spherical polygons an area of approximately 0 is returned by default, rather than attempting the unstable calculation. Empirical assessment of spherical Voronoi algorithm performance suggests quadratic time complexity (loglinear is optimal, but algorithms are more challenging to implement). The reconstitution of the surface area of the sphere, measured as the sum of the surface areas of all Voronoi regions, is closest to 100 % for larger (>> 10) numbers of generators. References ---------- .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of points on or close to a sphere. Research Report RR-7004, 2009. .. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html See Also -------- Voronoi : Conventional Voronoi diagrams in N dimensions. Examples -------- >>> from matplotlib import colors >>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection >>> import matplotlib.pyplot as plt >>> from scipy.spatial import SphericalVoronoi >>> from mpl_toolkits.mplot3d import proj3d >>> # set input data >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0], ... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ]) >>> center = np.array([0, 0, 0]) >>> radius = 1 >>> # calculate spherical Voronoi diagram >>> sv = SphericalVoronoi(points, radius, center) >>> # sort vertices (optional, helpful for plotting) >>> sv.sort_vertices_of_regions() >>> # generate plot >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='3d') >>> # plot the unit sphere for reference (optional) >>> u = np.linspace(0, 2 * np.pi, 100) >>> v = np.linspace(0, np.pi, 100) >>> x = np.outer(np.cos(u), np.sin(v)) >>> y = np.outer(np.sin(u), np.sin(v)) >>> z = np.outer(np.ones(np.size(u)), np.cos(v)) >>> ax.plot_surface(x, y, z, color='y', alpha=0.1) >>> # plot generator points >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b') >>> # plot Voronoi vertices >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2], ... c='g') >>> # indicate Voronoi regions (as Euclidean polygons) >>> for region in sv.regions: ... random_color = colors.rgb2hex(np.random.rand(3)) ... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0) ... polygon.set_color(random_color) ... ax.add_collection3d(polygon) >>> plt.show() """ def __init__(self, points, radius=None, center=None): """ Initializes the object and starts the computation of the Voronoi diagram. points : The generator points of the Voronoi diagram assumed to be all on the sphere with radius supplied by the radius parameter and center supplied by the center parameter. radius : The radius of the sphere. Will default to 1 if not supplied. center : The center of the sphere. Will default to the origin if not supplied. """ self.points = points if np.any(center): self.center = center else: self.center = np.zeros(3) if radius: self.radius = radius else: self.radius = 1 self.vertices = None self.regions = None self._tri = None self._calc_vertices_regions() def _calc_vertices_regions(self): """ Calculates the Voronoi vertices and regions of the generators stored in self.points. The vertices will be stored in self.vertices and the regions in self.regions. This algorithm was discussed at PyData London 2015 by Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk """ # perform 3D Delaunay triangulation on data set # (here ConvexHull can also be used, and is faster) self._tri = scipy.spatial.ConvexHull(self.points) # add the center to each of the simplices in tri to get the same # tetrahedrons we'd have gotten from Delaunay tetrahedralization # tetrahedrons will have shape: (2N-4, 4, 3) tetrahedrons = self._tri.points[self._tri.simplices] tetrahedrons = np.insert( tetrahedrons, 3, np.array([self.center]), axis=1 ) # produce circumcenters of tetrahedrons from 3D Delaunay # circumcenters will have shape: (2N-4, 3) circumcenters = calc_circumcenters(tetrahedrons) # project tetrahedron circumcenters to the surface of the sphere # self.vertices will have shape: (2N-4, 3) self.vertices = project_to_sphere( circumcenters, self.center, self.radius ) # calculate regions from triangulation # simplex_indices will have shape: (2N-4,) simplex_indices = np.arange(self._tri.simplices.shape[0]) # tri_indices will have shape: (6N-12,) tri_indices = np.column_stack([simplex_indices, simplex_indices, simplex_indices]).ravel() # point_indices will have shape: (6N-12,) point_indices = self._tri.simplices.ravel() # array_associations will have shape: (6N-12, 2) array_associations = np.dstack((point_indices, tri_indices))[0] array_associations = array_associations[np.lexsort(( array_associations[...,1], array_associations[...,0]))] array_associations = array_associations.astype(np.intp) # group by generator indices to produce # unsorted regions in nested list groups = [] for k, g in itertools.groupby(array_associations, lambda t: t[0]): groups.append(list(list(zip(*list(g)))[1])) self.regions = groups def sort_vertices_of_regions(self): """ For each region in regions, it sorts the indices of the Voronoi vertices such that the resulting points are in a clockwise or counterclockwise order around the generator point. This is done as follows: Recall that the n-th region in regions surrounds the n-th generator in points and that the k-th Voronoi vertex in vertices is the projected circumcenter of the tetrahedron obtained by the k-th triangle in _tri.simplices (and the origin). For each region n, we choose the first triangle (=Voronoi vertex) in _tri.simplices and a vertex of that triangle not equal to the center n. These determine a unique neighbor of that triangle, which is then chosen as the second triangle. The second triangle will have a unique vertex not equal to the current vertex or the center. This determines a unique neighbor of the second triangle, which is then chosen as the third triangle and so forth. We proceed through all the triangles (=Voronoi vertices) belonging to the generator in points and obtain a sorted version of the vertices of its surrounding region. """ _voronoi.sort_vertices_of_regions(self._tri.simplices, self.regions)
bsd-3-clause
jch1/models
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
12
1835
import numpy as np import sklearn.preprocessing as prep import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder mnist = input_data.read_data_sets('MNIST_data', one_hot = True) def standard_scale(X_train, X_test): preprocessor = prep.StandardScaler().fit(X_train) X_train = preprocessor.transform(X_train) X_test = preprocessor.transform(X_test) return X_train, X_test def get_random_block_from_data(data, batch_size): start_index = np.random.randint(0, len(data) - batch_size) return data[start_index:(start_index + batch_size)] X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) n_samples = int(mnist.train.num_examples) training_epochs = 20 batch_size = 128 display_step = 1 autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784, n_hidden = 200, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), scale = 0.01) for epoch in range(training_epochs): avg_cost = 0. total_batch = int(n_samples / batch_size) # Loop over all batches for i in range(total_batch): batch_xs = get_random_block_from_data(X_train, batch_size) # Fit training using batch data cost = autoencoder.partial_fit(batch_xs) # Compute average loss avg_cost += cost / n_samples * batch_size # Display logs per epoch step if epoch % display_step == 0: print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)) print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
apache-2.0
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/sklearn/tests/test_grid_search.py
1
28864
""" Testing for grid search module (sklearn.grid_search) """ import pickle import sys import warnings from collections import Iterable, Sized from itertools import chain, product import numpy as np import scipy.sparse as sp from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator from sklearn.cluster import KMeans from sklearn.datasets import make_blobs from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from sklearn.exceptions import ChangedBehaviorWarning from sklearn.exceptions import FitFailedWarning from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.externals.six.moves import xrange from sklearn.externals.six.moves import zip from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.neighbors import KernelDensity from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings with warnings.catch_warnings(): warnings.simplefilter('ignore') from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV, ParameterGrid, ParameterSampler) from sklearn.cross_validation import KFold, StratifiedKFold from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert_equal(list(grid), [grid[i] for i in range(len(grid))]) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert_true(isinstance(grid1, Iterable)) assert_true(isinstance(grid1, Sized)) assert_equal(len(grid1), 3) assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert_equal(len(grid2), 6) # loop to assert we can iterate over the grid multiple times for i in xrange(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert_equal(points, set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert_equal(len(empty), 1) assert_equal(list(empty), [{}]) assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert_equal(len(has_empty), 4) assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}]) assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert_equal(grid_search.best_estimator_.foo_param, 2) for i, foo_i in enumerate([1, 2, 3]): assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i}) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert_equal(grid_search_no_score.best_params_, grid_search.best_params_) # check that we can call score and that it gives the correct result assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y)) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc').fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y) score_accuracy = assert_warns(ChangedBehaviorWarning, search_accuracy.score, X, y) score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score, X, y) score_auc = assert_warns(ChangedBehaviorWarning, search_auc.score, X, y) # ensure the test is sane assert_true(score_auc < 1.0) assert_true(score_accuracy < 1.0) assert_not_equal(score_auc, score_accuracy) assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_trivial_grid_scores(): # Test search over a "grid" with only one point. # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}) grid_search.fit(X, y) assert_true(hasattr(grid_search, "grid_scores_")) random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1) random_search.fit(X, y) assert_true(hasattr(random_search, "grid_scores_")) def test_no_refit(): # Test that grid search can be used for model selection only clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False) grid_search.fit(X, y) assert_true(hasattr(grid_search, "best_params_")) def test_grid_search_error(): # Test that grid search will capture errors on data with different # length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_iid(): # test the iid parameter # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other svm = SVC(kernel='linear') # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] # once with iid=True (default) grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # for first split, 1/4 of dataset is in test, for second 3/4. # take weighted average assert_almost_equal(first.mean_validation_score, 1 * 1. / 4. + 1. / 3. * 3. / 4.) # once with iid=False grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv, iid=False) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) # scores are the same as above assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # averaged score is just mean of scores assert_almost_equal(first.mean_validation_score, np.mean(first.cv_validation_scores)) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC() cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_bad_param_grid(): param_dict = {"C": 1.0} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": np.ones(6).reshape(3, 2)} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_true(np.mean(y_pred == y_pred2) >= .9) assert_equal(C, C2) def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert_equal(C, C2) # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert_equal(C, C3) assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert_true(cv.best_score_ >= 0) # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert_true(np.mean(y_pred == y_test) >= 0) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10,)) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) def test_grid_search_precomputed_kernel_error_kernel_function(): # Test that grid search returns an error when using a kernel_function X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) kernel_function = lambda x1, x2: np.dot(x1, x2.T) clf = SVC(kernel=kernel_function) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_, y_) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert_true(not hasattr(self, 'has_been_fit_')) self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier(check_X=check_X, check_y=check_y) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_X=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_y=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert_true(hasattr(grid_search, "grid_scores_")) def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(random_state=0) km = KMeans(random_state=0) grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='adjusted_rand_score') grid_search.fit(X, y) # ARI can find the right number :) assert_equal(grid_search.best_params_["n_clusters"], 3) # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert_equal(grid_search.best_params_["n_clusters"], 4) def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert_equal(search.best_params_['bandwidth'], .1) assert_equal(search.best_score_, 42) def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert_equal(len(samples), 10) for sample in samples: assert_true(sample["kernel"] in ["rbf", "linear"]) assert_true(0 <= sample["C"] <= 1) def test_randomized_search_grid_scores(): # Make a dataset with a lot of noise to get various kind of prediction # errors across CV folds and parameter settings X, y = make_classification(n_samples=200, n_features=100, n_informative=3, random_state=0) # XXX: as of today (scipy 0.12) it's not possible to set the random seed # of scipy.stats distributions: the assertions in this test should thus # not depend on the randomization params = dict(C=expon(scale=10), gamma=expon(scale=0.1)) n_cv_iter = 3 n_search_iter = 30 search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter, param_distributions=params, iid=False) search.fit(X, y) assert_equal(len(search.grid_scores_), n_search_iter) # Check consistency of the structure of each cv_score item for cv_score in search.grid_scores_: assert_equal(len(cv_score.cv_validation_scores), n_cv_iter) # Because we set iid to False, the mean_validation score is the # mean of the fold mean scores instead of the aggregate sample-wise # mean score assert_almost_equal(np.mean(cv_score.cv_validation_scores), cv_score.mean_validation_score) assert_equal(list(sorted(cv_score.parameters.keys())), list(sorted(params.keys()))) # Check the consistency with the best_score_ and best_params_ attributes sorted_grid_scores = list(sorted(search.grid_scores_, key=lambda x: x.mean_validation_score)) best_score = sorted_grid_scores[-1].mean_validation_score assert_equal(search.best_score_, best_score) tied_best_params = [s.parameters for s in sorted_grid_scores if s.mean_validation_score == best_score] assert_true(search.best_params_ in tied_best_params, "best_params_={0} is not part of the" " tied best models: {1}".format( search.best_params_, tied_best_params)) def test_grid_search_score_consistency(): # test that correct scores are used clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score) grid_search.fit(X, y) cv = StratifiedKFold(n_folds=3, y=y) for C, scores in zip(Cs, grid_search.grid_scores_): clf.set_params(C=C) scores = scores[2] # get the separate runs from grid scores i = 0 for train, test in cv: clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, scores[i]) i += 1 def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True) grid_search.fit(X, y) pickle.dumps(grid_search) # smoke test random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3) random_search.fit(X, y) pickle.dumps(random_search) # smoke test def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold(y.shape[0], random_state=0) estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) for parameters, _, cv_validation_scores in grid_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) for parameters, _, cv_validation_scores in random_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert_false(hasattr(gs, "predict_proba")) def test_grid_search_allows_nans(): # Test GridSearchCV with Imputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. assert all(np.all(this_point.cv_validation_scores == 0.0) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) assert all(np.all(np.isnan(this_point.cv_validation_scores)) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise error if n_iter too large params = {'first': [0, 1], 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params, n_iter=7) assert_raises(ValueError, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=6) samples = list(sampler) assert_equal(len(samples), 6) for values in ParameterGrid(params): assert_true(values in samples) # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert_equal(len(samples), 99) hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert_equal(len(set(hashable_samples)), 99) # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert_equal(len(samples), 7)
mit
mmiguel6288code/super-nova-search
supernovasearch/ImgProc.py
1
7756
''' Matthew Miguel mmiguel6288code@gmail.com https://github.com/mmiguel6288code/super-nova-search ''' from __future__ import absolute_import, division, print_function from builtins import (bytes, str, open, super, range, zip, round, input, int, pow, object) try: basestring except NameError: basestring = str import pdb, os, re, datetime from shutil import copyfile from .utils import task_status import imreg_dft as ird from astropy.io import fits from astropy.stats import sigma_clipped_stats from photutils import DAOStarFinder import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils import CircularAperture import matplotlib.pyplot as plt import ccdproc #File Management base_path = r'local/images' capture_path = r'capture' reference_path = r'reference' delta_path = r'delta' log_path = r'detection' def make_mdy(y,m,d): return datetime.datetime(y,m,d).strftime('%B %d %Y') def today_mdy(): return datetime.datetime.now().strftime('%B %d %Y') def valid_mdy(mdy): m = re.search('([A-Za-z]+) (\\d+) (\\d+)',mdy) if m == None: return False month,day,year = m.groups() if not month in ['January','February','March','April','May','June','July','August','September','October','November','December']: return False if not(1 <= int(day) <= 31): return False return True def parse_fits_name(fname): '<prefix>.<img_number>.<object_name>.fit' m = re.search('(.+)\\.([0-9]{8})\\.(.+)\\.fit',fname,re.I) if m != None: prefix,img_num,obj_name = m.groups() else: prefix,img_num,obj_name = None,None,None return prefix,img_num,obj_name def make_fits_name(prefix,img_num,obj_name): return prefix + '.' + img_num + '.' + obj_name + '.FIT' def reference_fits_name(fname): prefix,_,obj_name = parse_fits_name(fname) return make_fits_name(prefix,'reference',obj_name) def load_fits_data(fpath): return fits.open(fpath)[0].data def similarity(imref,imtgt): return ird.similarity(imref,imtgt,numiter=3)['timg'] def save_fits_data(imdata,fpath): hdu = fits.PrimaryHDU(imdata) hdu.writeto(fpath,overwrite=True) def find_unprocessed_captures(mdy,basedir=base_path): ''' mdy = <Month> <Day> <Year> e.g. June 11 2017 ''' to_process = [] with task_status('Finding unprocessed captures'): for fname in os.listdir(os.path.join(basedir,capture_path,mdy)): if not os.path.exists(os.path.join(basedir,delta_path,mdy,fname)): try: ref_name = reference_fits_name(fname) except: print('Cannot determine reference file for file:',fname) else: if not os.path.exists(os.path.join(basedir,reference_path,ref_name)): raise Exception('Missing reference file',ref_name) to_process.append(fname) return to_process #Image Processing def process_ccd(data): return cosmic(ccdproc.median_filter(data,2))[0] def cosmic(data,sigclip=5): newdata,mask = ccdproc.cosmicray_lacosmic(data,sigclip) return newdata,mask def source_detection(imdata): mean,median,std = sigma_clipped_stats(imdata,sigma=3.0,iters=5) daofind = DAOStarFinder(fwhm=3.0,threshold=5.*std) sources = daofind(imdata - median) sources.sort('flux') sources.reverse() return sources def supernova_detection(imdel,imref): refmean,refmedian,refstd = sigma_clipped_stats(imref,sigma=3.0,iters=5) mean,median,std = sigma_clipped_stats(imdel,sigma=3.0,iters=5) daofind = DAOStarFinder(fwhm=3.0,threshold=5.*refstd) sources = daofind(imdel - median) sources.sort('flux') sources.reverse() return sources def save_source_log(mdy,cap_fname,sources,ref=False,basedir=base_path): os.makedirs(os.path.join(basedir,log_path,mdy),exist_ok=True) prefix,num,obj_name = parse_fits_name(cap_fname) if ref: log_fpath = os.path.join(basedir,log_path,mdy,prefix + '.' + num + '.' + obj_name + '.ref.log') else: log_fpath = os.path.join(basedir,log_path,mdy,prefix + '.' + num + '.' + obj_name + '.log') sources.write(log_fpath,format='ascii.ecsv',overwrite=True) #with open(log_fpath,'w') as f: # f.write(str(sources)) def save_source_plot(mdy,cap_fname,sources,im_data,ref=False,basedir=base_path): os.makedirs(os.path.join(basedir,log_path,mdy),exist_ok=True) positions = (sources['xcentroid'],sources['ycentroid']) apertures = CircularAperture(positions,r=4.) norm = ImageNormalize(stretch=SqrtStretch()) plt.figure() apertures.plot(color='blue',lw=1.5,alpha=0.5) plt.imshow(im_data,cmap='Greys',origin='lower',norm=norm) prefix,num,obj_name = parse_fits_name(cap_fname) if ref: plot_fpath = os.path.join(basedir,log_path,mdy,prefix + '.' + num + '.' + obj_name + '.ref.png') else: plot_fpath = os.path.join(basedir,log_path,mdy,prefix + '.' + num + '.' + obj_name + '.png') plt.savefig(plot_fpath) plt.close() def copy_to_reference(y=None,m=None,d=None,mdy=None,basedir=base_path): if y == None: if mdy == None: mdy = today_mdy() else: mdy = make_mdy(y,m,d) if not valid_mdy(mdy): raise Exception('Invalid date folder name:',mdy) cap_path = os.path.join(basedir,capture_path,mdy) with task_status('Copying files to reference:',mdy): for fname in sorted(os.listdir(cap_path)): capture_fpath = os.path.join(basedir,capture_path,mdy,fname) ref_name = reference_fits_name(fname) ref_fpath = os.path.join(basedir,reference_path,ref_name) with task_status('Copying',fname): copyfile(capture_fpath,ref_fpath) def process_images(y=None,m=None,d=None,mdy=None,basedir='.',only_new=True): if y == None: if mdy == None: mdy = today_mdy() else: mdy = make_mdy(y,m,d) if not valid_mdy(mdy): raise Exception('Invalid date folder name:',mdy) to_process = find_unprocessed_captures(mdy,basedir=basedir) for fname in to_process: capture_fpath = os.path.join(basedir,capture_path,mdy,fname) ref_name = reference_fits_name(fname) ref_fpath = os.path.join(basedir,reference_path,ref_name) delta_fpath = os.path.realpath(os.path.join(basedir,delta_path,mdy,fname)) with task_status('Loading captured file:',fname): imcap = process_ccd(load_fits_data(capture_fpath)) with task_status('Loading reference file:',ref_name): imref = process_ccd(load_fits_data(ref_fpath)) with task_status('Performing image registration'): imreg = similarity(imref,imcap) with task_status('Performing delta'): imdel = imreg - imref with task_status('Saving delta'): os.makedirs(os.path.join(basedir,delta_path,mdy),exist_ok=True) save_fits_data(imdel,delta_fpath) with task_status('Performing source detection'): sources = supernova_detection(imdel,imref) refsources = source_detection(imref) with task_status('Saving source detection results'): save_source_log(mdy,fname,sources,basedir=basedir) save_source_log(mdy,fname,refsources,ref=True,basedir=basedir) save_source_plot(mdy,fname,sources,imreg,basedir=basedir) save_source_plot(mdy,fname,refsources,imref,ref=True,basedir=basedir)
apache-2.0
dbischof90/optimal_extreme_value_portfolios
scripts/analyze.py
1
2704
from matplotlib import pyplot as plt from tabulate import tabulate from projectRoot import ROOT_DIR PLOT_DIR = ROOT_DIR + "/results" DATA_DIR = ROOT_DIR + "/data" def plotPortfolioWeights(results): for simulation in results: for measure in simulation["data"].keys(): fig = plt.figure() fig.suptitle('Portfolio weights for ' + measure + '-optimal portfolios', fontsize=14, fontweight='bold') ax = fig.add_subplot(111) ax.set_title('Profile: ' + str(simulation["profile"].NAME) + ', Regular tail index: ' + str(simulation["profile"].IND_REG) + ', Hidden tail index: ' + str(simulation["profile"].IND_HIDDEN)) ax.plot([e.x for e in simulation["data"][measure]["analytical"]]) ax.plot([e.x for e in simulation["data"][measure]["empirical"]]) ax.set_xticks([x * simulation["profile"].RES / 10 for x in range(11)]) ax.set_xticklabels([simulation["profile"].STARTVAR + x for x in range(11)]) fig.show() def plotPortfolioValues(results): for simulation in results: for measure in simulation["data"].keys(): fig = plt.figure() fig.suptitle('Portfolio values for ' + measure + '-optimal portfolios', fontsize=14, fontweight='bold') ax = fig.add_subplot(111) ax.set_title('Profile: ' + str(simulation["profile"].NAME) + ', Regular tail index: ' + str(simulation["profile"].IND_REG) + ', Hidden tail index: ' + str(simulation["profile"].IND_HIDDEN)) ax.plot([e.fun for e in simulation["data"][measure]["analytical"]]) ax.plot([e.fun for e in simulation["data"][measure]["empirical"]]) ax.set_xticks([x * simulation["profile"].RES / 10 for x in range(11)]) ax.set_xticklabels([simulation["profile"].STARTVAR + x for x in range(11)]) fig.show() def giveInfos(listOfProfiles, numWorkers): infoString = [] for profile in listOfProfiles: infoString.append(tabulate([["Name:", profile.NAME], ["Steps to calculate:", profile.RES], ["Sample size:", profile.SAMPLESIZE], ["Used risk measures:", ', '.join(profile.RISKMEASURES)]], tablefmt="simple")) infoString.append('\n') if numWorkers > 1: numProcesses = "Jobs splitted to " + str(numWorkers) + " cores" else: numProcesses = "Single-core execution" print('New profiles in queue:\n' + ''.join(infoString) + '\nTotal number: ' + str(len(listOfProfiles)) + '\n' + numProcesses + '\n')
mit
cdawei/digbeta
dchen/music/src/PLGEN2_bp.py
2
2222
import os import sys import gzip import time import numpy as np import pickle as pkl from sklearn.metrics import roc_auc_score from models import BPMTC if len(sys.argv) != 8: print('Usage: python', sys.argv[0], 'WORK_DIR DATASET C1 C2 C3 P TRAIN_DEV(Y/N)') sys.exit(0) else: work_dir = sys.argv[1] dataset = sys.argv[2] C1 = float(sys.argv[3]) C2 = float(sys.argv[4]) C3 = float(sys.argv[5]) p = float(sys.argv[6]) trndev = sys.argv[7] # assert trndev in ['Y', 'N'] # assert trndev == 'Y' if trndev != 'Y': raise ValueError('trndev should be "Y"') data_dir = os.path.join(work_dir, 'data/%s/setting4' % dataset) fx = os.path.join(data_dir, 'X.pkl.gz') fytrain = os.path.join(data_dir, 'Y_train.pkl.gz') fytest = os.path.join(data_dir, 'Y_test.pkl.gz') fcliques_train = os.path.join(data_dir, 'cliques_train.pkl.gz') fprefix = 'trndev-plgen2-bp-%g-%g-%g-%g' % (C1, C2, C3, p) fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix) fnpy = os.path.join(data_dir, '%s.npy' % fprefix) X = pkl.load(gzip.open(fx, 'rb')) Y_train = pkl.load(gzip.open(fytrain, 'rb')) Y_test = pkl.load(gzip.open(fytest, 'rb')) cliques_train = pkl.load(gzip.open(fcliques_train, 'rb')) print('C: %g, %g, %g, p: %g' % (C1, C2, C3, p)) print(X.shape, Y_train.shape) print(time.strftime('%Y-%m-%d %H:%M:%S')) if os.path.exists(fmodel): print('evaluating ...') clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation else: print('training ...') clf = BPMTC(X, Y_train, C1=C1, C2=C2, C3=C3, p=p, user_playlist_indices=cliques_train) clf.fit(verbose=2, fnpy=fnpy) if clf.trained is True: pkl.dump(clf, gzip.open(fmodel, 'wb')) rps = [] aucs = [] for j in range(Y_test.shape[1]): y_true = Y_test[:, j].A.reshape(-1) npos = y_true.sum() assert npos > 0 y_pred = np.dot(X, clf.mu).reshape(-1) sortix = np.argsort(-y_pred) y_ = y_true[sortix] rps.append(np.mean(y_[:npos])) aucs.append(roc_auc_score(y_true, y_pred)) clf.metric_score = (np.mean(rps), np.mean(aucs), len(rps), Y_test.shape[1]) pkl.dump(clf, gzip.open(fmodel, 'wb')) print('\n%g, %g, %d / %d' % clf.metric_score)
gpl-3.0
karwa/swift
utils/dev-scripts/scurve_printer.py
37
2875
#!/usr/bin/env python # This is a simple script that takes in an scurve file produced by # csvcolumn_to_scurve and produces a png graph of the scurve. import argparse import csv import matplotlib.pyplot as plt import numpy as np FIELDS = ['N/total', 'New/Old'] def get_data(input_file): global FIELDS for row in csv.DictReader(input_file): yield (float(row[FIELDS[0]]), float(row[FIELDS[1]])) def main(): p = argparse.ArgumentParser() p.add_argument('input_csv_file', type=argparse.FileType('r')) p.add_argument('output_file', type=str) p.add_argument('-y-axis-num-tick-marks', type=int, help='The number of y tick marks to use above/below zero.') p.add_argument('-y-axis-min', type=float, help='Override the min y axis that we use') p.add_argument('-y-axis-max', type=float, help='Override the min y axis that we use') p.add_argument('-title', type=str, help='Title of the graph') p.add_argument('-x-axis-title', type=str, help='The title to use on the x-axis of the graph') p.add_argument('-y-axis-title', type=str, help='The title to use on the x-axis of the graph') args = p.parse_args() data = np.array(list(get_data(args.input_csv_file))) assert np.all(data >= 0) x = data[:, 0] y = data[:, 1] x_axis_title = args.x_axis_title or FIELDS[0] y_axis_title = args.y_axis_title or FIELDS[1] title = args.title or "{} vs {}".format(x_axis_title, y_axis_title) fig, ax = plt.subplots() fig.set_size_inches(18.5, 18.5) fig.suptitle(title, fontsize=20) ax.set_xlabel(x_axis_title, fontsize=20) ax.set_ylabel(y_axis_title, fontsize=20) ax.plot(x, y) ax.scatter(x, y) # To get good bounds, we: # # 1. Re-center our data at 0 by subtracting 1. This will give us the % # difference in between new and old (i.e. (new - old)/old) # # 2. Then we take the maximum absolute delta from zero and round to a # multiple of 5 away from zero. Lets call this value limit. # # 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit] recentered_data = y - 1.0 max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0) y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01 ax.set_xlim(0.0, 1.0) y_min = args.y_axis_min or 1.0 - y_limit y_max = args.y_axis_max or 1.0 + y_limit assert(y_min <= y_max) ax.set_ylim(y_min, y_max) ax.grid(True) ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05)) if args.y_axis_num_tick_marks: y_delta = y_max - y_min y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks) ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency)) plt.savefig(args.output_file) if __name__ == "__main__": main()
apache-2.0
TNick/pylearn2
pylearn2/train_extensions/roc_auc.py
30
4854
""" TrainExtension subclass for calculating ROC AUC scores on monitoring dataset(s), reported via monitor channels. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" import numpy as np try: from sklearn.metrics import roc_auc_score except ImportError: roc_auc_score = None import theano from theano import gof, config from theano import tensor as T from pylearn2.train_extensions import TrainExtension class RocAucScoreOp(gof.Op): """ Theano Op wrapping sklearn.metrics.roc_auc_score. Parameters ---------- name : str, optional (default 'roc_auc') Name of this Op. use_c_code : WRITEME """ def __init__(self, name='roc_auc', use_c_code=theano.config.cxx): super(RocAucScoreOp, self).__init__(use_c_code) self.name = name def make_node(self, y_true, y_score): """ Calculate ROC AUC score. Parameters ---------- y_true : tensor_like Target class labels. y_score : tensor_like Predicted class labels or probabilities for positive class. """ y_true = T.as_tensor_variable(y_true) y_score = T.as_tensor_variable(y_score) output = [T.scalar(name=self.name, dtype=config.floatX)] return gof.Apply(self, [y_true, y_score], output) def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs try: roc_auc = roc_auc_score(y_true, y_score) except ValueError: roc_auc = np.nan output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX) class RocAucChannel(TrainExtension): """ Adds a ROC AUC channel to the monitor for each monitoring dataset. This monitor will return nan unless both classes are represented in y_true. For this reason, it is recommended to set monitoring_batches to 1, especially when using unbalanced datasets. Parameters ---------- channel_name_suffix : str, optional (default 'roc_auc') Channel name suffix. positive_class_index : int, optional (default 1) Index of positive class in predicted values. negative_class_index : int or None, optional (default None) Index of negative class in predicted values for calculation of one vs. one performance. If None, uses all examples not in the positive class (one vs. the rest). """ def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1, negative_class_index=None): self.channel_name_suffix = channel_name_suffix self.positive_class_index = positive_class_index self.negative_class_index = negative_class_index def setup(self, model, dataset, algorithm): """ Add ROC AUC channels for monitoring dataset(s) to model.monitor. Parameters ---------- model : object The model being trained. dataset : object Training dataset. algorithm : object Training algorithm. """ m_space, m_source = model.get_monitoring_data_specs() state, target = m_space.make_theano_batch() y = T.argmax(target, axis=1) y_hat = model.fprop(state)[:, self.positive_class_index] # one vs. the rest if self.negative_class_index is None: y = T.eq(y, self.positive_class_index) # one vs. one else: pos = T.eq(y, self.positive_class_index) neg = T.eq(y, self.negative_class_index) keep = T.add(pos, neg).nonzero() y = T.eq(y[keep], self.positive_class_index) y_hat = y_hat[keep] roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat) roc_auc = T.cast(roc_auc, config.floatX) for dataset_name, dataset in algorithm.monitoring_dataset.items(): if dataset_name: channel_name = '{0}_{1}'.format(dataset_name, self.channel_name_suffix) else: channel_name = self.channel_name_suffix model.monitor.add_channel(name=channel_name, ipt=(state, target), val=roc_auc, data_specs=(m_space, m_source), dataset=dataset)
bsd-3-clause
abhisg/scikit-learn
examples/ensemble/plot_adaboost_twoclass.py
347
3268
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The distributions of decision scores are shown separately for samples of class A and B. The predicted class label for each sample is determined by the sign of the decision score. Samples with decision scores greater than zero are classified as B, and are otherwise classified as A. The magnitude of a decision score determines the degree of likeness with the predicted class label. Additionally, a new dataset could be constructed containing a desired purity of class B, for example, by only selecting samples with a decision score above some value. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import make_gaussian_quantiles # Construct dataset X1, y1 = make_gaussian_quantiles(cov=2., n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1) X = np.concatenate((X1, X2)) y = np.concatenate((y1, - y2 + 1)) # Create and fit an AdaBoosted decision tree bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200) bdt.fit(X, y) plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries plt.subplot(121) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') # Plot the two-class decision scores twoclass_output = bdt.decision_function(X) plot_range = (twoclass_output.min(), twoclass_output.max()) plt.subplot(122) for i, n, c in zip(range(2), class_names, plot_colors): plt.hist(twoclass_output[y == i], bins=10, range=plot_range, facecolor=c, label='Class %s' % n, alpha=.5) x1, x2, y1, y2 = plt.axis() plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc='upper right') plt.ylabel('Samples') plt.xlabel('Score') plt.title('Decision Scores') plt.tight_layout() plt.subplots_adjust(wspace=0.35) plt.show()
bsd-3-clause
Titan-C/scikit-learn
sklearn/neighbors/tests/test_approximate.py
12
20126
""" Testing for the approximate neighbor search using Locality Sensitive Hashing Forest module (sklearn.neighbors.LSHForest). """ # Author: Maheshakya Wijewardena, Joel Nothman import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_array_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.metrics.pairwise import pairwise_distances from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors def test_lsh_forest_deprecation(): assert_warns_message(DeprecationWarning, "LSHForest has poor performance and has been " "deprecated in 0.19. It will be removed " "in version 0.21.", LSHForest) def test_neighbors_accuracy_with_n_candidates(): # Checks whether accuracy increases as `n_candidates` increases. n_candidates_values = np.array([.1, 50, 500]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_candidates_values.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, n_candidates in enumerate(n_candidates_values): lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_candidates=n_candidates) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies print('accuracies:', accuracies) assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") def test_neighbors_accuracy_with_n_estimators(): # Checks whether accuracy increases as `n_estimators` increases. n_estimators = np.array([1, 10, 100]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_estimators.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, t in enumerate(n_estimators): lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_candidates=500, n_estimators=t) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") @ignore_warnings def test_kneighbors(): # Checks whether desired number of neighbors are returned. # It is guaranteed to return the requested number of neighbors # if `min_hash_match` is set to 0. Returned distances should be # in ascending order. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0) # Test unfitted estimator assert_raises(ValueError, lshf.kneighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=False) # Desired number of neighbors should be returned. assert_equal(neighbors.shape[1], n_neighbors) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=True) assert_equal(neighbors.shape[0], n_queries) assert_equal(distances.shape[0], n_queries) # Test only neighbors neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=False) assert_equal(neighbors.shape[0], n_queries) # Test random point(not in the data set) query = rng.randn(n_features).reshape(1, -1) lshf.kneighbors(query, n_neighbors=1, return_distance=False) # Test n_neighbors at initialization neighbors = lshf.kneighbors(query, return_distance=False) assert_equal(neighbors.shape[1], 5) # Test `neighbors` has an integer dtype assert_true(neighbors.dtype.kind == 'i', msg="neighbors are not in integer dtype.") def test_radius_neighbors(): # Checks whether Returned distances are less than `radius` # At least one point should be returned when the `radius` is set # to mean distance from the considering point to other points in # the database. # Moreover, this test compares the radius neighbors of LSHForest # with the `sklearn.neighbors.NearestNeighbors`. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() # Test unfitted estimator assert_raises(ValueError, lshf.radius_neighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): # Select a random point in the dataset as the query query = X[rng.randint(0, n_samples)].reshape(1, -1) # At least one neighbor should be returned when the radius is the # mean distance from the query to the points of the dataset. mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=False) assert_equal(neighbors.shape, (1,)) assert_equal(neighbors.dtype, object) assert_greater(neighbors[0].shape[0], 0) # All distances to points in the results of the radius query should # be less than mean_dist distances, neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=True) assert_array_less(distances[0], mean_dist) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.radius_neighbors(queries, return_distance=True) # dists and inds should not be 1D arrays or arrays of variable lengths # hence the use of the object dtype. assert_equal(distances.shape, (n_queries,)) assert_equal(distances.dtype, object) assert_equal(neighbors.shape, (n_queries,)) assert_equal(neighbors.dtype, object) # Compare with exact neighbor search query = X[rng.randint(0, n_samples)].reshape(1, -1) mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist) distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist) # Radius-based queries do not sort the result points and the order # depends on the method, the random_state and the dataset order. Therefore # we need to sort the results ourselves before performing any comparison. sorted_dists_exact = np.sort(distances_exact[0]) sorted_dists_approx = np.sort(distances_approx[0]) # Distances to exact neighbors are less than or equal to approximate # counterparts as the approximate radius query might have missed some # closer neighbors. assert_true(np.all(np.less_equal(sorted_dists_exact, sorted_dists_approx))) @ignore_warnings def test_radius_neighbors_boundary_handling(): X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]] n_points = len(X) # Build an exact nearest neighbors model as reference model to ensure # consistency between exact and approximate methods nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) # Build a LSHForest model with hyperparameter values that always guarantee # exact results on this toy dataset. lsfh = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0, n_candidates=n_points, random_state=42).fit(X) # define a query aligned with the first axis query = [[1., 0.]] # Compute the exact cosine distances of the query to the four points of # the dataset dists = pairwise_distances(query, X, metric='cosine').ravel() # The first point is almost aligned with the query (very small angle), # the cosine distance should therefore be almost null: assert_almost_equal(dists[0], 0, decimal=5) # The second point form an angle of 45 degrees to the query vector assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4)) # The third point is orthogonal from the query vector hence at a distance # exactly one: assert_almost_equal(dists[2], 1) # The last point is almost colinear but with opposite sign to the query # therefore it has a cosine 'distance' very close to the maximum possible # value of 2. assert_almost_equal(dists[3], 2, decimal=5) # If we query with a radius of one, all the samples except the last sample # should be included in the results. This means that the third sample # is lying on the boundary of the radius query: exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1) assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2]) assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1]) # If we perform the same query with a slightly lower radius, the third # point of the dataset that lay on the boundary of the previous query # is now rejected: eps = np.finfo(np.float64).eps exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps) assert_array_equal(np.sort(exact_idx[0]), [0, 1]) assert_array_equal(np.sort(approx_idx[0]), [0, 1]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2]) def test_distances(): # Checks whether returned neighbors are from closest to farthest. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) distances, neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=True) # Returned neighbors should be from closest to farthest, that is # increasing distance values. assert_true(np.all(np.diff(distances[0]) >= 0)) # Note: the radius_neighbors method does not guarantee the order of # the results. def test_fit(): # Checks whether `fit` method sets all attribute values correctly. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_estimators=n_estimators) ignore_warnings(lshf.fit)(X) # _input_array = X assert_array_equal(X, lshf._fit_X) # A hash function g(p) for each tree assert_equal(n_estimators, len(lshf.hash_functions_)) # Hash length = 32 assert_equal(32, lshf.hash_functions_[0].components_.shape[0]) # Number of trees_ in the forest assert_equal(n_estimators, len(lshf.trees_)) # Each tree has entries for every data point assert_equal(n_samples, len(lshf.trees_[0])) # Original indices after sorting the hashes assert_equal(n_estimators, len(lshf.original_indices_)) # Each set of original indices in a tree has entries for every data point assert_equal(n_samples, len(lshf.original_indices_[0])) def test_partial_fit(): # Checks whether inserting array is consistent with fitted data. # `partial_fit` method should set all attribute values correctly. n_samples = 12 n_samples_partial_fit = 3 n_features = 2 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) X_partial_fit = rng.rand(n_samples_partial_fit, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() # Test unfitted estimator ignore_warnings(lshf.partial_fit)(X) assert_array_equal(X, lshf._fit_X) ignore_warnings(lshf.fit)(X) # Insert wrong dimension assert_raises(ValueError, lshf.partial_fit, np.random.randn(n_samples_partial_fit, n_features - 1)) ignore_warnings(lshf.partial_fit)(X_partial_fit) # size of _input_array = samples + 1 after insertion assert_equal(lshf._fit_X.shape[0], n_samples + n_samples_partial_fit) # size of original_indices_[1] = samples + 1 assert_equal(len(lshf.original_indices_[0]), n_samples + n_samples_partial_fit) # size of trees_[1] = samples + 1 assert_equal(len(lshf.trees_[1]), n_samples + n_samples_partial_fit) def test_hash_functions(): # Checks randomness of hash functions. # Variance and mean of each hash function (projection vector) # should be different from flattened array of hash functions. # If hash functions are not randomly built (seeded with # same value), variances and means of all functions are equal. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_estimators=n_estimators, random_state=rng.randint(0, np.iinfo(np.int32).max)) ignore_warnings(lshf.fit)(X) hash_functions = [] for i in range(n_estimators): hash_functions.append(lshf.hash_functions_[i].components_) for i in range(n_estimators): assert_not_equal(np.var(hash_functions), np.var(lshf.hash_functions_[i].components_)) for i in range(n_estimators): assert_not_equal(np.mean(hash_functions), np.mean(lshf.hash_functions_[i].components_)) def test_candidates(): # Checks whether candidates are sufficient. # This should handle the cases when number of candidates is 0. # User should be warned when number of candidates is less than # requested number of neighbors. X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]], dtype=np.float32) X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1) # For zero candidates lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=32) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (3, 32)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=3) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3) assert_equal(distances.shape[1], 3) # For candidates less than n_neighbors lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=31) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (5, 31)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=5) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5) assert_equal(distances.shape[1], 5) def test_graphs(): # Smoke tests for graph methods. n_samples_sizes = [5, 10, 20] n_features = 3 rng = np.random.RandomState(42) for n_samples in n_samples_sizes: X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0) ignore_warnings(lshf.fit)(X) kneighbors_graph = lshf.kneighbors_graph(X) radius_neighbors_graph = lshf.radius_neighbors_graph(X) assert_equal(kneighbors_graph.shape[0], n_samples) assert_equal(kneighbors_graph.shape[1], n_samples) assert_equal(radius_neighbors_graph.shape[0], n_samples) assert_equal(radius_neighbors_graph.shape[1], n_samples) def test_sparse_input(): # note: Fixed random state in sp.rand is not supported in older scipy. # The test should succeed regardless. X1 = sp.rand(50, 100) X2 = sp.rand(10, 100) forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)( radius=1, random_state=0).fit(X1) forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)( radius=1, random_state=0).fit(X1.A) d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True) assert_almost_equal(d_sparse, d_dense) assert_almost_equal(i_sparse, i_dense) d_sparse, i_sparse = forest_sparse.radius_neighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.radius_neighbors(X2.A, return_distance=True) assert_equal(d_sparse.shape, d_dense.shape) for a, b in zip(d_sparse, d_dense): assert_almost_equal(a, b) for a, b in zip(i_sparse, i_dense): assert_almost_equal(a, b)
bsd-3-clause
ksthesis/gatk
src/main/python/org/broadinstitute/hellbender/vqsr_cnn/vqsr_cnn/plots.py
6
6827
# plots.py # # Plotting code for Variant Filtration with Neural Nets # This includes evaluation plots like Precision and Recall curves, # various flavors of Receiver Operating Characteristic (ROC curves), # As well as graphs of the metrics that are watched during neural net training. # # December 2016 # Sam Friedman # sam@broadinstitute.org # Imports import os import math import matplotlib import numpy as np matplotlib.use('Agg') # Need this to write images from the GSA servers. Order matters: import matplotlib.pyplot as plt # First import matplotlib, then use Agg, then import plt from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve, average_precision_score image_ext = '.png' color_array = ['red', 'indigo', 'cyan', 'pink', 'purple'] key_colors = { 'Neural Net':'green', 'CNN_SCORE':'green', 'CNN_2D':'green', 'Heng Li Hard Filters':'lightblue', 'GATK Hard Filters':'orange','GATK Signed Distance':'darksalmon', 'VQSR gnomAD':'cornflowerblue', 'VQSR Single Sample':'blue', 'VQSLOD':'cornflowerblue', 'Deep Variant':'magenta', 'QUAL':'magenta', 'DEEP_VARIANT_QUAL':'magenta', 'Random Forest':'darkorange', 'SNP':'cornflowerblue', 'NOT_SNP':'orange', 'INDEL':'green', 'NOT_INDEL':'red', 'VQSLOD none':'cornflowerblue', 'VQSLOD strModel':'orange', 'VQSLOD default':'green', 'REFERENCE':'green', 'HET_SNP':'cornflowerblue', 'HOM_SNP':'blue', 'HET_DELETION':'magenta', 'HOM_DELETION':'violet', 'HET_INSERTION':'orange', 'HOM_INSERTION':'darkorange' } precision_label = 'Precision | Positive Predictive Value | TP/(TP+FP)' recall_label = 'Recall | Sensitivity | True Positive Rate | TP/(TP+FN)' fallout_label = 'Fallout | 1 - Specificity | False Positive Rate | FP/(FP+TN)' def get_fpr_tpr_roc(model, test_data, test_truth, labels, batch_size=32): """Get false positive and true positive rates from a classification model. Arguments: model: The model whose predictions to evaluate. test_data: Input testing data in the shape the model expects. test_truth: The true labels of the testing data labels: dict specifying the class labels. batch_size: Size of batches for prediction over the test data. Returns: dict, dict, dict: false positive rate, true positive rate, and area under ROC curve. The dicts all use label indices as keys. fpr and tpr dict's values are lists (the x and y coordinates that defines the ROC curves) and for AUC the value is a float. """ y_pred = model.predict(test_data, batch_size=batch_size, verbose=0) return get_fpr_tpr_roc_pred(y_pred, test_truth, labels) def get_fpr_tpr_roc_pred(y_pred, test_truth, labels): """Get false positive and true positive rates from predictions and true labels. Arguments: y_pred: model predictions to evaluate. test_truth: The true labels of the testing data labels: dict specifying the class labels. Returns: dict, dict, dict: false positive rate, true positive rate, and area under ROC curve. The dicts all use label indices as keys. fpr and tpr dict's values are lists (the x and y coordinates that defines the ROC curves) and for AUC the value is a float. """ fpr = dict() tpr = dict() roc_auc = dict() for k in labels.keys(): cur_idx = labels[k] fpr[labels[k]], tpr[labels[k]], _ = roc_curve(test_truth[:,cur_idx], y_pred[:,cur_idx]) roc_auc[labels[k]] = auc(fpr[labels[k]], tpr[labels[k]]) return fpr, tpr, roc_auc def plot_roc_per_class(model, test_data, test_truth, labels, title, batch_size=32, prefix='./figures/'): """Plot a per class ROC curve. Arguments: model: The model whose predictions to evaluate. test_data: Input testing data in the shape the model expects. test_truth: The true labels of the testing data labels: dict specifying the class labels. title: the title to display on the plot. batch_size: Size of batches for prediction over the test data. prefix: path specifying where to save the plot. """ fpr, tpr, roc_auc = get_fpr_tpr_roc(model, test_data, test_truth, labels, batch_size) lw = 3 plt.figure(figsize=(28,22)) matplotlib.rcParams.update({'font.size': 34}) for key in labels.keys(): if key in key_colors: color = key_colors[key] else: color = np.random.choice(color_array) plt.plot(fpr[labels[key]], tpr[labels[key]], color=color, lw=lw, label=str(key)+' area under ROC: %0.3f'%roc_auc[labels[key]]) plt.plot([0, 1], [0, 1], 'k:', lw=0.5) plt.xlim([0.0, 1.0]) plt.ylim([-0.02, 1.03]) plt.xlabel(fallout_label) plt.ylabel(recall_label) plt.title('ROC:'+ title + '\n') matplotlib.rcParams.update({'font.size': 56}) plt.legend(loc="lower right") figure_path = prefix+"per_class_roc_"+title+image_ext if not os.path.exists(os.path.dirname(figure_path)): os.makedirs(os.path.dirname(figure_path)) plt.savefig(figure_path) print('Saved figure at:', figure_path) def plot_metric_history(history, title, prefix='./figures/'): """Plot metric history throughout training. Arguments: history: History object returned by Keras fit function. title: the title to display on the plot. prefix: path specifying where to save the plot. """ num_plots = len([k for k in history.history.keys() if not 'val' in k]) row = 0 col = 0 rows = 4 cols = max(2, int(math.ceil(num_plots/float(rows)))) f, axes = plt.subplots(rows, cols, sharex=True, figsize=(36, 24)) for k in sorted(history.history.keys()): if 'val' not in k: axes[row, col].plot(history.history[k]) axes[row, col].set_ylabel(str(k)) axes[row, col].set_xlabel('epoch') if 'val_'+k in history.history: axes[row, col].plot(history.history['val_'+k]) labels = ['train', 'valid'] else: labels = [k] axes[row, col].legend(labels, loc='upper left') row += 1 if row == rows: row = 0 col += 1 if row*col >= rows*cols: break axes[0, 1].set_title(title) figure_path = prefix+"metric_history_"+title+image_ext if not os.path.exists(os.path.dirname(figure_path)): os.makedirs(os.path.dirname(figure_path)) plt.savefig(figure_path) def weight_path_to_title(wp): """Get a title from a model's weight path Arguments: wp: path to model's weights. Returns: str: a reformatted string """ return wp.split('/')[-1].replace('__', '-').split('.')[0]
bsd-3-clause
lazywei/scikit-learn
examples/neighbors/plot_kde_1d.py
347
5100
""" =================================== Simple 1D Kernel Density Estimation =================================== This example uses the :class:`sklearn.neighbors.KernelDensity` class to demonstrate the principles of Kernel Density Estimation in one dimension. The first plot shows one of the problems with using histograms to visualize the density of points in 1D. Intuitively, a histogram can be thought of as a scheme in which a unit "block" is stacked above each point on a regular grid. As the top two panels show, however, the choice of gridding for these blocks can lead to wildly divergent ideas about the underlying shape of the density distribution. If we instead center each block on the point it represents, we get the estimate shown in the bottom left panel. This is a kernel density estimation with a "top hat" kernel. This idea can be generalized to other kernel shapes: the bottom-right panel of the first figure shows a Gaussian kernel density estimate over the same distribution. Scikit-learn implements efficient kernel density estimation using either a Ball Tree or KD Tree structure, through the :class:`sklearn.neighbors.KernelDensity` estimator. The available kernels are shown in the second figure of this example. The third figure compares kernel density estimates for a distribution of 100 samples in 1 dimension. Though this example uses 1D distributions, kernel density estimation is easily and efficiently extensible to higher dimensions as well. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm from sklearn.neighbors import KernelDensity #---------------------------------------------------------------------- # Plot the progression of histograms to kernels np.random.seed(1) N = 20 X = np.concatenate((np.random.normal(0, 1, 0.3 * N), np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] bins = np.linspace(-5, 10, 10) fig, ax = plt.subplots(2, 2, sharex=True, sharey=True) fig.subplots_adjust(hspace=0.05, wspace=0.05) # histogram 1 ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True) ax[0, 0].text(-3.5, 0.31, "Histogram") # histogram 2 ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True) ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted") # tophat KDE kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF') ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density") # Gaussian KDE kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF') ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density") for axi in ax.ravel(): axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k') axi.set_xlim(-4, 9) axi.set_ylim(-0.02, 0.34) for axi in ax[:, 0]: axi.set_ylabel('Normalized Density') for axi in ax[1, :]: axi.set_xlabel('x') #---------------------------------------------------------------------- # Plot all available kernels X_plot = np.linspace(-6, 6, 1000)[:, None] X_src = np.zeros((1, 1)) fig, ax = plt.subplots(2, 3, sharex=True, sharey=True) fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05) def format_func(x, loc): if x == 0: return '0' elif x == 1: return 'h' elif x == -1: return '-h' else: return '%ih' % x for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']): axi = ax.ravel()[i] log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot) axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF') axi.text(-2.6, 0.95, kernel) axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) axi.xaxis.set_major_locator(plt.MultipleLocator(1)) axi.yaxis.set_major_locator(plt.NullLocator()) axi.set_ylim(0, 1.05) axi.set_xlim(-2.9, 2.9) ax[0, 1].set_title('Available Kernels') #---------------------------------------------------------------------- # Plot a 1D density example N = 100 np.random.seed(1) X = np.concatenate((np.random.normal(0, 1, 0.3 * N), np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0]) + 0.7 * norm(5, 1).pdf(X_plot[:, 0])) fig, ax = plt.subplots() ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2, label='input distribution') for kernel in ['gaussian', 'tophat', 'epanechnikov']: kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X) log_dens = kde.score_samples(X_plot) ax.plot(X_plot[:, 0], np.exp(log_dens), '-', label="kernel = '{0}'".format(kernel)) ax.text(6, 0.38, "N={0} points".format(N)) ax.legend(loc='upper left') ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k') ax.set_xlim(-4, 9) ax.set_ylim(-0.02, 0.4) plt.show()
bsd-3-clause
sirrice/scorpion
scorpion/sigmod/streamrangemerger.py
1
16456
import json import math import pdb import random import numpy as np import sys import time sys.path.extend(['.', '..']) from itertools import chain, repeat, izip from collections import defaultdict from operator import mul, and_, or_ from scipy.optimize import fsolve from matplotlib import pyplot as plt from ..util import * from ..bottomup.bounding_box import * from ..bottomup.cluster import * from frontier import * from rangemerger import RangeMerger2 _logger = get_logger() class StreamRangeMerger(RangeMerger2): """ Streaming version of the range merger. This lets Scorpion overlap the partitioning algorithm with the merging algorithm. """ def __init__(self, *args, **kwargs): super(StreamRangeMerger, self).__init__(*args, **kwargs) self.valid_cluster_f = kwargs.get('valid_cluster_f', lambda c: True) # idx -> clusters to expand -- different than clusters on frontier!! self.tasks = defaultdict(list) # all values for each dimension self.all_cont_vals = defaultdict(set) # idx -> values # attribute name -> { attr val -> [sum of influence value at c=0.1, count] } self.all_disc_vals = defaultdict(lambda: defaultdict(lambda: [0,0])) # name -> { val -> # times failed } self.failed_disc_vals = defaultdict(lambda: defaultdict(lambda:0)) # stores the frontier after each iteration self.added = set() self.seen = set() self.frontiers = [] self.adj_graph = None self.K = 2 self.nblocks = 50 if len(self.learner.full_table) < 40000: self.K = 2 self.nblocks = 60 if len(self.learner.full_table) < 10000: self.nblocks = 100 self.get_frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.nblocks, learner=self.learner) self.get_frontier.stats = self.stats if self.DEBUG: self.renderer = InfRenderer('/tmp/merger.pdf', c_range=self.c_range) def close(self): if self.DEBUG: self.renderer.close() def get_frontier_obj(self, version): while version >= len(self.frontiers): frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.blocks, learner=self.learner) frontier.stats = self.stats self.frontiers.append(frontier) return self.frontiers[version] @property def frontier_iter(self): return list(self.frontiers) @instrument def setup_stats(self, clusters): all_inf = lambda l: all([abs(v) == float('inf') for v in l]) clusters = filter(lambda c: c.bound_hash not in self.added, clusters) clusters = filter(lambda c: not all_inf(c.inf_state[0]), clusters) clusters = filter(lambda c: len(c.inf_state[2]) == 0 or not all_inf(c.inf_state[2]), clusters) self.added.update([c.bound_hash for c in clusters]) super(StreamRangeMerger, self).setup_stats(clusters) start = time.time() if not self.adj_graph: self.adj_graph = self.make_adjacency([], True) self.adj_graph.insert(clusters) self.adj_graph.sync() self.stats['adj_sync'][0] += time.time() - start self.stats['adj_sync'][1] += 1 for c in clusters: for idx in xrange(len(c.cols)): self.all_cont_vals[idx].add(c.bbox[0][idx]) self.all_cont_vals[idx].add(c.bbox[1][idx]) for disc, vals in c.discretes.iteritems(): if len(vals) < 3: vals = [(v,) for v in vals] else: vals = [tuple(vals)] for v in vals: self.all_disc_vals[disc][v][0] += c.inf_func(0.1) self.all_disc_vals[disc][v][1] += 1 #self.all_disc_vals[disc].update(vals) return clusters @instrument def best_so_far(self, prune=False): clusters = set() for frontier in self.frontier_iter: clusters.update(frontier.frontier) if prune: for c in clusters: c.c_range = list(self.c_range) clusters = self.get_frontier(clusters)[0] clusters = filter(lambda c: r_vol(c.c_range), clusters) if self.DEBUG: self.renderer.new_page() self.renderer.set_title('best so far') self.renderer.plot_active_inf_curves(clusters) return clusters @instrument def best_at_c(self, c_val, K=6): clusters = set() for frontier in self.frontier_iter: clusters.update(frontier.seen_clusters) rm_dups(clusters, key=lambda c: str(c.rule)) clusters = sorted(clusters, key=lambda c: c.inf_func(c_val), reverse=True)[:K] return clusters @instrument def add_clusters(self, clusters, idx=0): """ Return list of new clusters that are on the frontier """ if not clusters: return [] if self.DEBUG: print "add_clusters" self.print_clusters(clusters) self.renderer.new_page() self.renderer.set_title("add_clusters %d clusters" % len(clusters)) for f in self.frontier_iter: self.renderer.plot_inf_curves(f.frontier, color='grey') self.renderer.plot_inf_curves(clusters, color='green') clusters = self.setup_stats(clusters) base_frontier = self.get_frontier_obj(idx) clusters, _ = base_frontier.update(clusters) if self.DEBUG: print "base_frontier" self.print_clusters(clusters) self.renderer.plot_active_inf_curves(clusters, color='red') # clear out current tasks self.tasks[idx] = filter(base_frontier.__contains__, self.tasks[idx]) self.tasks[idx].extend(clusters) # remove non-frontier-based expansions from future expansion for tidx in self.tasks.keys(): if tidx <= idx: continue checker = lambda c: not any(map(base_frontier.__contains__, c.ancestors)) self.tasks[tidx] = filter(checker, self.tasks[tidx]) if clusters: _logger.debug("merger:\tadded %d clusters\t%d tasks left", len(clusters), self.ntasks) return clusters @property def ntasks(self): if len(self.tasks) == 0: return 0 return sum(map(len, self.tasks.values())) def has_next_task(self): if not self.tasks: return False return self.ntasks > 0 def next_tasks(self, n=1): ret = [] for tkey in reversed(self.tasks.keys()): tasks = self.tasks[tkey] while len(ret) < n and tasks: idx = random.randint(0, len(tasks)-1) ret.append((idx, tasks.pop(idx))) return ret @instrument def __call__(self, n=2): """ Return any successfully expanded clusters (improvements) """ nmerged = self.nmerged start = time.time() tasks = self.next_tasks(n) improvements = set() for idx, cluster in tasks: cur_frontier = self.get_frontier_obj(idx) next_frontier = self.get_frontier_obj(idx+1) new_clusters = self.run_task(idx, cluster, cur_frontier, next_frontier) debug = self.DEBUG self.DEBUG = False self.add_clusters(new_clusters, idx+1) self.DEBUG = debug improvements.update(new_clusters) _logger.debug("merger\ttook %.1f sec\t%d improved\t%d tried\t%d tasks left", time.time()-start, len(improvements), (self.nmerged-nmerged), self.ntasks) return improvements def run_task(self, idx, cluster, cur_frontier, next_frontier): if not (idx == 0 or self.valid_cluster_f(cluster)): _logger.debug("merger\tbelow thresh skipping\t %s" % cluster) return [] if self.DEBUG: self.renderer.new_page() self.renderer.set_title("expand %s" % str(cluster.rule)) self.renderer.plot_inf_curves([cluster], color='grey') self.rejected_disc_vals = defaultdict(list) self.rejected_cont_vals = defaultdict(set) expanded = self.greedy_expansion(cluster, self.seen, idx, cur_frontier) expanded = [c for c in expanded if c.bound_hash != cluster.bound_hash] if self.DEBUG: self.renderer.plot_inf_curves(expanded, color='green') cur_expanded, rms = cur_frontier.update(expanded) next_expanded, rms2 = next_frontier.update(cur_expanded) f = lambda c: c.bound_hash != cluster.bound_hash improved_clusters = set(filter(f, next_expanded)) to_hash = lambda cs: set([c.bound_hash for c in cs]) exp_bounds = to_hash(expanded) cur_bounds = to_hash(cur_expanded) next_bounds = to_hash(next_expanded) for c in chain(cur_expanded, rms): _logger.debug("merger\texpanded\tcur_idx(%s)\tnext_idx(%s)\t%.3f-%.3f\t%s", (c.bound_hash in exp_bounds), (c.bound_hash in next_bounds), c.c_range[0], c.c_range[1], c.rule.simplify()) if self.DEBUG: self.renderer.plot_active_inf_curves(cur_frontier.frontier, color='blue') self.renderer.plot_active_inf_curves(next_frontier.frontier, color='red') return improved_clusters @instrument def dims_to_expand(self, cluster, seen, version=None): for idx in xrange(len(cluster.cols)): vals = np.array(list(self.all_cont_vals[idx])) smaller = vals[(vals < cluster.bbox[0][idx])] bigger = vals[(vals > cluster.bbox[1][idx])] yield idx, 'dec', smaller.tolist() yield idx, 'inc', bigger.tolist() for name, vals in cluster.discretes.iteritems(): ret = [] maxval = (len(vals) > 1) and max(vals) or None vals2infs = self.all_disc_vals[name].items() vals2infs.sort(key=lambda p: p[1][0] / float(p[1][1]+1.), reverse=True) for disc_vals, score in vals2infs: subset = set(disc_vals).difference(vals) subset.difference_update([v for v in subset if self.failed_disc_vals[name][str(v)] > 1]) if maxval: subset = set(filter(lambda v: v >= maxval, subset)) ret.append(subset) ret = filter(bool, ret) if ret: yield name, 'disc', ret return p = np.arange(len(ret), 0, -1).astype(float) p /= p.sum() ret = np.random.choice(ret, min(len(ret), 10), p=p, replace=False) yield name, 'disc', ret @instrument def check_direction(self, cluster, dim, direction, vals): key = cluster.bound_hash if direction == 'disc': for subset in self.rejected_disc_vals[dim]: if subset.issubset(vals): return [] if direction == 'inc': cont_vals = self.rejected_cont_vals[(dim, direction)] if cont_vals: vals = filter(lambda v: v > max(cont_vals), vals) if direction == 'dec': cont_vals = self.rejected_cont_vals[(dim, direction)] if cont_vals: vals = filter(lambda v: v < min(cont_vals), vals) return vals @instrument def update_rejected_directions(self, cluster, dim, direction, val): if direction == 'disc': if not hasattr(val, '__iter__'): val = [val] for v in list(val): self.rejected_disc_vals[dim].append(set([v])) self.failed_disc_vals[dim][str(v)] += 1 if direction == 'inc': self.rejected_cont_vals[(dim, direction)].add(round(val, 1)) if direction == 'dec': self.rejected_cont_vals[(dim, direction)].add(round(val, 1)) @instrument def greedy_expansion(self, cluster, seen, version=None, frontier=None): _logger.debug("merger\tgreedy_expand\t%s", cluster.rule.simplify()) if frontier is None: frontier = CheapFrontier(self.c_range, K=1, nblocks=15, learner=self.learner) frontier.stats = self.stats frontier.update([cluster]) cols = cluster.cols for dim, direction, vals in self.dims_to_expand(cluster, seen, version=version): if len(vals) == 0: continue attrname = isinstance(dim, basestring) and dim or cols[dim] vals = self.check_direction(cluster, dim, direction, vals) realvals = self.pick_expansion_vals(cluster, dim, direction, vals) nfails = 0 for v in realvals: tmp = None if direction == 'inc': tmp = self.dim_merge(cluster, dim, None, v, seen) elif direction == 'dec': tmp = self.dim_merge(cluster, dim, v, None, seen) else: tmp = self.disc_merge(cluster, dim, v) if not tmp: _logger.debug("merger\tnoexpand\t%s\t%s\t%s options", attrname[:15], direction, len(vals)) continue improvements = frontier.improvement(tmp) if improvements.max() > 0: print str(tmp) print "\t", [round(v,2) for v in improvements] frontier.update([tmp]) isbetter = tmp in frontier _logger.debug("merger\tcand\t%s\t%s\t%s\t%s", attrname[:15], direction, isbetter, v) seen.add(tmp.bound_hash) if not isbetter: self.update_rejected_directions(cluster, dim, direction, v) if direction != 'disc': nfails += 1 if nfails > 1: break if direction != 'disc': cluster = tmp return frontier.frontier class PartitionedStreamRangeMerger(StreamRangeMerger): """ Partitions the merger based on user defined labels so that frontier curves from one partition do not suppress curves in another partition MR labels based on dimensionality BDT labels as leaf/non-leaf """ def __init__(self, *args, **kwargs): super(PartitionedStreamRangeMerger, self).__init__(*args, **kwargs) self.frontiers = defaultdict(list) self.tasks = defaultdict(list) def get_frontier_obj(self, version, partitionkey): frontiers = self.frontiers[partitionkey] while version >= len(frontiers): frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.nblocks, learner=self.learner) frontier.stats = self.stats frontiers.append(frontier) return frontiers[version] @property def frontier_iter(self): return chain(*self.frontiers.values()) @instrument def add_clusters(self, clusters, idx=0, partitionkey=None, skip_frontier=False): """ Return list of new clusters that are on the frontier """ if partitionkey is None: raise RuntimeError('addclusters partitionkey cannot be none') if not clusters: return [] print "add %d clusters" % len(clusters) if self.DEBUG: self.renderer.new_page() self.renderer.set_title("add_clusters %d clusters" % len(clusters)) for f in self.frontier_iter: self.renderer.plot_inf_curves(f.frontier, color='grey') self.renderer.plot_inf_curves(clusters, color='green') nclusters = len(clusters) clusters = self.setup_stats(clusters) frontier = self.get_frontier_obj(idx, partitionkey) if not skip_frontier: clusters, _ = frontier.update(clusters) # XXX: new cluster should be better than _all_ frontiers #for f in self.frontier_iter: #clusters, _ = f.update(clusters) if not clusters: return clusters if self.DEBUG and not skip_frontier: print "base_frontier" self.print_clusters(clusters) if self.DEBUG: self.renderer.plot_active_inf_curves(clusters, color='red') # clear out current tasks tkey = (partitionkey, idx) self.tasks[tkey] = filter(frontier.__contains__, self.tasks[tkey]) self.tasks[tkey].extend(clusters) # remove non-frontier-based expansions from future expansion for (pkey, tidx) in self.tasks.keys(): if pkey != partitionkey: continue if tidx <= idx: continue checker = lambda c: not any(map(frontier.__contains__, c.ancestors)) self.tasks[tkey] = filter(checker, self.tasks[tkey]) _logger.debug("merger\t%s\tadd %d of %d clusters\t%d idx\t%d tasks left", partitionkey, len(clusters), nclusters, idx, self.ntasks) return clusters def next_tasks(self, n=1): ret = [] for tkey in reversed(self.tasks.keys()): if len(ret) >= n: break tasks = self.tasks[tkey] ntasks = len(tasks) if not ntasks: continue idxs = np.random.choice(ntasks, min(ntasks, n-len(ret)), replace=False).tolist() for idx in sorted(idxs, reverse=True): ret.append((tkey[0], tkey[1], tasks.pop(idx))) return ret def __call__(self, n=2): nmerged = self.nmerged start = time.time() tasks = self.next_tasks(n) improvements = set() for pkey, idx, cluster in tasks: cur_frontier = self.get_frontier_obj(idx, pkey) next_frontier = self.get_frontier_obj(idx+1, pkey) new_clusters = self.run_task(idx, cluster, cur_frontier, next_frontier) self.add_clusters(new_clusters, idx=idx+1, partitionkey=pkey, skip_frontier=True) improvements.update(new_clusters) _logger.debug("merger\t%s\ttook %.1f sec\t%d improved\t%d tried\t%d tasks left", pkey, time.time()-start, len(improvements), (self.nmerged-nmerged), self.ntasks) return improvements
mit
jlegendary/scikit-learn
sklearn/tests/test_random_projection.py
142
14033
from __future__ import division import numpy as np import scipy.sparse as sp from sklearn.metrics import euclidean_distances from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import gaussian_random_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.random_projection import SparseRandomProjection from sklearn.random_projection import GaussianRandomProjection from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.utils import DataDimensionalityWarning all_sparse_random_matrix = [sparse_random_matrix] all_dense_random_matrix = [gaussian_random_matrix] all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix) all_SparseRandomProjection = [SparseRandomProjection] all_DenseRandomProjection = [GaussianRandomProjection] all_RandomProjection = set(all_SparseRandomProjection + all_DenseRandomProjection) # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros): rng = np.random.RandomState(0) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def densify(matrix): if not sp.issparse(matrix): return matrix else: return matrix.toarray() n_samples, n_features = (10, 1000) n_nonzeros = int(n_samples * n_features / 100.) data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros) ############################################################################### # test on JL lemma ############################################################################### def test_invalid_jl_domain(): assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5) def test_input_size_jl_min_dim(): assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100], 2 * [0.9]) assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100], 2 * [0.9]) johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)), 0.5 * np.ones((10, 10))) ############################################################################### # tests random matrix generation ############################################################################### def check_input_size_random_matrix(random_matrix): assert_raises(ValueError, random_matrix, 0, 0) assert_raises(ValueError, random_matrix, -1, 1) assert_raises(ValueError, random_matrix, 1, -1) assert_raises(ValueError, random_matrix, 1, 0) assert_raises(ValueError, random_matrix, -1, 0) def check_size_generated(random_matrix): assert_equal(random_matrix(1, 5).shape, (1, 5)) assert_equal(random_matrix(5, 1).shape, (5, 1)) assert_equal(random_matrix(5, 5).shape, (5, 5)) assert_equal(random_matrix(1, 1).shape, (1, 1)) def check_zero_mean_and_unit_norm(random_matrix): # All random matrix should produce a transformation matrix # with zero mean and unit norm for each columns A = densify(random_matrix(10000, 1, random_state=0)) assert_array_almost_equal(0, np.mean(A), 3) assert_array_almost_equal(1.0, np.linalg.norm(A), 1) def check_input_with_sparse_random_matrix(random_matrix): n_components, n_features = 5, 10 for density in [-1., 0.0, 1.1]: assert_raises(ValueError, random_matrix, n_components, n_features, density=density) def test_basic_property_of_random_matrix(): # Check basic properties of random matrix generation for random_matrix in all_random_matrix: yield check_input_size_random_matrix, random_matrix yield check_size_generated, random_matrix yield check_zero_mean_and_unit_norm, random_matrix for random_matrix in all_sparse_random_matrix: yield check_input_with_sparse_random_matrix, random_matrix random_matrix_dense = \ lambda n_components, n_features, random_state: random_matrix( n_components, n_features, random_state=random_state, density=1.0) yield check_zero_mean_and_unit_norm, random_matrix_dense def test_gaussian_random_matrix(): # Check some statical properties of Gaussian random matrix # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # a_ij ~ N(0.0, 1 / n_components). # n_components = 100 n_features = 1000 A = gaussian_random_matrix(n_components, n_features, random_state=0) assert_array_almost_equal(0.0, np.mean(A), 2) assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1) def test_sparse_random_matrix(): # Check some statical properties of sparse random matrix n_components = 100 n_features = 500 for density in [0.3, 1.]: s = 1 / density A = sparse_random_matrix(n_components, n_features, density=density, random_state=0) A = densify(A) # Check possible values values = np.unique(A) assert_in(np.sqrt(s) / np.sqrt(n_components), values) assert_in(- np.sqrt(s) / np.sqrt(n_components), values) if density == 1.0: assert_equal(np.size(values), 2) else: assert_in(0., values) assert_equal(np.size(values), 3) # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # # - -sqrt(s) / sqrt(n_components) with probability 1 / 2s # - 0 with probability 1 - 1 / s # - +sqrt(s) / sqrt(n_components) with probability 1 / 2s # assert_almost_equal(np.mean(A == 0.0), 1 - 1 / s, decimal=2) assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2) assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2) assert_almost_equal(np.var(A == 0.0, ddof=1), (1 - 1 / s) * 1 / s, decimal=2) assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2) assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2) ############################################################################### # tests on random projection transformer ############################################################################### def test_sparse_random_projection_transformer_invalid_density(): for RandomProjection in all_SparseRandomProjection: assert_raises(ValueError, RandomProjection(density=1.1).fit, data) assert_raises(ValueError, RandomProjection(density=0).fit, data) assert_raises(ValueError, RandomProjection(density=-0.1).fit, data) def test_random_projection_transformer_invalid_input(): for RandomProjection in all_RandomProjection: assert_raises(ValueError, RandomProjection(n_components='auto').fit, [0, 1, 2]) assert_raises(ValueError, RandomProjection(n_components=-10).fit, data) def test_try_to_transform_before_fit(): for RandomProjection in all_RandomProjection: assert_raises(ValueError, RandomProjection(n_components='auto').transform, data) def test_too_many_samples_to_find_a_safe_embedding(): data, _ = make_sparse_random_data(1000, 100, 1000) for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', eps=0.1) expected_msg = ( 'eps=0.100000 and n_samples=1000 lead to a target dimension' ' of 5920 which is larger than the original space with' ' n_features=100') assert_raise_message(ValueError, expected_msg, rp.fit, data) def test_random_projection_embedding_quality(): data, _ = make_sparse_random_data(8, 5000, 15000) eps = 0.2 original_distances = euclidean_distances(data, squared=True) original_distances = original_distances.ravel() non_identical = original_distances != 0.0 # remove 0 distances to avoid division by 0 original_distances = original_distances[non_identical] for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', eps=eps, random_state=0) projected = rp.fit_transform(data) projected_distances = euclidean_distances(projected, squared=True) projected_distances = projected_distances.ravel() # remove 0 distances to avoid division by 0 projected_distances = projected_distances[non_identical] distances_ratio = projected_distances / original_distances # check that the automatically tuned values for the density respect the # contract for eps: pairwise distances are preserved according to the # Johnson-Lindenstrauss lemma assert_less(distances_ratio.max(), 1 + eps) assert_less(1 - eps, distances_ratio.min()) def test_SparseRandomProjection_output_representation(): for SparseRandomProjection in all_SparseRandomProjection: # when using sparse input, the projected data can be forced to be a # dense numpy array rp = SparseRandomProjection(n_components=10, dense_output=True, random_state=0) rp.fit(data) assert isinstance(rp.transform(data), np.ndarray) sparse_data = sp.csr_matrix(data) assert isinstance(rp.transform(sparse_data), np.ndarray) # the output can be left to a sparse matrix instead rp = SparseRandomProjection(n_components=10, dense_output=False, random_state=0) rp = rp.fit(data) # output for dense input will stay dense: assert isinstance(rp.transform(data), np.ndarray) # output for sparse output will be sparse: assert sp.issparse(rp.transform(sparse_data)) def test_correct_RandomProjection_dimensions_embedding(): for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components='auto', random_state=0, eps=0.5).fit(data) # the number of components is adjusted from the shape of the training # set assert_equal(rp.n_components, 'auto') assert_equal(rp.n_components_, 110) if RandomProjection in all_SparseRandomProjection: assert_equal(rp.density, 'auto') assert_almost_equal(rp.density_, 0.03, 2) assert_equal(rp.components_.shape, (110, n_features)) projected_1 = rp.transform(data) assert_equal(projected_1.shape, (n_samples, 110)) # once the RP is 'fitted' the projection is always the same projected_2 = rp.transform(data) assert_array_equal(projected_1, projected_2) # fit transform with same random seed will lead to the same results rp2 = RandomProjection(random_state=0, eps=0.5) projected_3 = rp2.fit_transform(data) assert_array_equal(projected_1, projected_3) # Try to transform with an input X of size different from fitted. assert_raises(ValueError, rp.transform, data[:, 1:5]) # it is also possible to fix the number of components and the density # level if RandomProjection in all_SparseRandomProjection: rp = RandomProjection(n_components=100, density=0.001, random_state=0) projected = rp.fit_transform(data) assert_equal(projected.shape, (n_samples, 100)) assert_equal(rp.components_.shape, (100, n_features)) assert_less(rp.components_.nnz, 115) # close to 1% density assert_less(85, rp.components_.nnz) # close to 1% density def test_warning_n_components_greater_than_n_features(): n_features = 20 data, _ = make_sparse_random_data(5, n_features, int(n_features / 4)) for RandomProjection in all_RandomProjection: assert_warns(DataDimensionalityWarning, RandomProjection(n_components=n_features + 1).fit, data) def test_works_with_sparse_data(): n_features = 20 data, _ = make_sparse_random_data(5, n_features, int(n_features / 4)) for RandomProjection in all_RandomProjection: rp_dense = RandomProjection(n_components=3, random_state=1).fit(data) rp_sparse = RandomProjection(n_components=3, random_state=1).fit(sp.csr_matrix(data)) assert_array_almost_equal(densify(rp_dense.components_), densify(rp_sparse.components_))
bsd-3-clause
Ziqi-Li/bknqgis
bokeh/examples/app/weather/main.py
15
3101
from os.path import join, dirname import datetime import pandas as pd from scipy.signal import savgol_filter from bokeh.io import curdoc from bokeh.layouts import row, column from bokeh.models import ColumnDataSource, DataRange1d, Select from bokeh.palettes import Blues4 from bokeh.plotting import figure STATISTICS = ['record_min_temp', 'actual_min_temp', 'average_min_temp', 'average_max_temp', 'actual_max_temp', 'record_max_temp'] def get_dataset(src, name, distribution): df = src[src.airport == name].copy() del df['airport'] df['date'] = pd.to_datetime(df.date) # timedelta here instead of pd.DateOffset to avoid pandas bug < 0.18 (Pandas issue #11925) df['left'] = df.date - datetime.timedelta(days=0.5) df['right'] = df.date + datetime.timedelta(days=0.5) df = df.set_index(['date']) df.sort_index(inplace=True) if distribution == 'Smoothed': window, order = 51, 3 for key in STATISTICS: df[key] = savgol_filter(df[key], window, order) return ColumnDataSource(data=df) def make_plot(source, title): plot = figure(x_axis_type="datetime", plot_width=800, tools="", toolbar_location=None) plot.title.text = title plot.quad(top='record_max_temp', bottom='record_min_temp', left='left', right='right', color=Blues4[2], source=source, legend="Record") plot.quad(top='average_max_temp', bottom='average_min_temp', left='left', right='right', color=Blues4[1], source=source, legend="Average") plot.quad(top='actual_max_temp', bottom='actual_min_temp', left='left', right='right', color=Blues4[0], alpha=0.5, line_color="black", source=source, legend="Actual") # fixed attributes plot.xaxis.axis_label = None plot.yaxis.axis_label = "Temperature (F)" plot.axis.axis_label_text_font_style = "bold" plot.x_range = DataRange1d(range_padding=0.0) plot.grid.grid_line_alpha = 0.3 return plot def update_plot(attrname, old, new): city = city_select.value plot.title.text = "Weather data for " + cities[city]['title'] src = get_dataset(df, cities[city]['airport'], distribution_select.value) source.data.update(src.data) city = 'Austin' distribution = 'Discrete' cities = { 'Austin': { 'airport': 'AUS', 'title': 'Austin, TX', }, 'Boston': { 'airport': 'BOS', 'title': 'Boston, MA', }, 'Seattle': { 'airport': 'SEA', 'title': 'Seattle, WA', } } city_select = Select(value=city, title='City', options=sorted(cities.keys())) distribution_select = Select(value=distribution, title='Distribution', options=['Discrete', 'Smoothed']) df = pd.read_csv(join(dirname(__file__), 'data/2015_weather.csv')) source = get_dataset(df, cities[city]['airport'], distribution) plot = make_plot(source, "Weather data for " + cities[city]['title']) city_select.on_change('value', update_plot) distribution_select.on_change('value', update_plot) controls = column(city_select, distribution_select) curdoc().add_root(row(plot, controls)) curdoc().title = "Weather"
gpl-2.0
tiagoantao/genomics-notebooks
src/model.py
1
19213
# -*- coding: utf-8 -*- ''' .. module:: genomics :synopsis: PopGen classes with simulations :noindex: :copyright: Copyright 2014 by Tiago Antao :license: GNU Affero, see LICENSE for details .. moduleauthor:: Tiago Antao <tra@popgen.net> ''' import copy import inspect import math import numpy as np import networkx as nx from matplotlib import pyplot as plt from matplotlib.patches import Ellipse from IPython.core.pylabtools import print_figure from IPython.display import Image import simuOpt simuOpt.setOptions(gui=False, quiet=True) import simuPOP as sp from simuPOP import demography def _hook_view(pop, param): view = param view.complete_cycle(pop) return True class Model: def __init__(self, gens): self._gens = gens self._views = [] self.pop_size = 100 self.num_msats = 10 self.num_snps = 0 # With SNPs, MSats disappear self.snp_freq = 0.1 # Frequency of the derived allele self.num_msat_alleles = 10 self.mut_msat = None self.sample_size = None # All individuals self._stats = set() self._info_fields = set() self._sim_ids = [] self._sims = [] def register(self, view): self._views.append(view) def add_stat(self, stat): self._stats.add(stat) def _repr__png_(self): param_order = list(self._variation_params.keys()) param_order.sort() if len(self._variation_params) == 0: ys = 1 xs = 1 elif len(self._variation_params) == 1: p1 = param_order[0] vals = self._variation_params[p1] xs = min([3, len(vals)]) ys = math.ceil(len(vals) / 3) else: p1 = param_order[0] p2 = param_order[1] xs = len(self._variation_params[p1]) ys = len(self._variation_params[p2]) fig, axs = plt.subplots(ys, xs, squeeze=False, figsize=(16, 9)) for i, sim_params in enumerate(self._sim_ids): x = i % 3 y = i // 3 ax = axs[y, x] self._draw_sim(ax, sim_params) for i in range(i + 1, ys * xs): x = i % 3 y = i // 3 ax = axs[y, x] ax.set_axis_off() data = print_figure(fig, 'png') plt.close(fig) return data @property def png(self): return Image(self._repr__png_(), embed=True) def _create_snp_genome(self, num_snps, freq): init_ops = [] loci = num_snps * [1] for snp in range(num_snps): init_ops.append(sp.InitGenotype(freq=[1 - freq, freq], loci=snp)) return loci, init_ops def _create_genome(self, num_msats, mut=None, start_alleles=10): init_ops = [] loci = num_msats * [1] pre_ops = [] max_allele_msats = 100 for msat in range(num_msats): diri = np.random.mtrand.dirichlet([1.0] * start_alleles) if type(diri[0]) == float: diri_list = diri else: diri_list = list(diri) init_ops.append( sp.InitGenotype(freq=[0.0] * ((max_allele_msats + 1 - 8) // 2) + diri_list + [0.0] * ((max_allele_msats + 1 - 8) // 2), loci=msat)) if mut is not None: pre_ops.append(sp.StepwiseMutator(rates=mut)) return loci, init_ops, pre_ops def _create_single_pop(self, pop_size, nloci): init_ops = [] init_ops.append(sp.InitSex()) pop = sp.Population(pop_size, ploidy=2, loci=[1] * nloci, chromTypes=[sp.AUTOSOME] * nloci, infoFields=list(self._info_fields)) pre_ops = [] post_ops = [] return pop, init_ops, pre_ops, post_ops def _create_island(self, pop_sizes, mig, nloci): init_ops = [] init_ops.append(sp.InitSex()) pop = sp.Population(pop_sizes, ploidy=2, loci=[1] * nloci, chromTypes=[sp.AUTOSOME] * nloci, infoFields=list(self._info_fields)) post_ops = [sp.Migrator( demography.migrIslandRates(mig, len(pop_sizes)))] pre_ops = [] self._info_fields.add('migrate_to') return pop, init_ops, pre_ops, post_ops def _create_stepping_stone(self, pop_sizes, mig, nloci): if len(pop_sizes) == 1: flat_pop_sizes = pop_sizes[0] post_ops = [sp.Migrator( demography.migrSteppingStoneRates(mig, len(flat_pop_sizes)))] else: flat_pop_sizes = [] for line in pop_sizes: flat_pop_sizes.extend(line) post_ops = [sp.Migrator( demography.migr2DSteppingStoneRates(mig, len(pop_sizes), len(pop_sizes[0])))] init_ops = [] init_ops.append(sp.InitSex()) pop = sp.Population(flat_pop_sizes, ploidy=2, loci=[1] * nloci, chromTypes=[sp.AUTOSOME] * nloci, infoFields=list(self._info_fields)) pre_ops = [] self._info_fields.add('migrate_to') return pop, init_ops, pre_ops, post_ops def prepare_sim_vars(self): fixed_params = {} variation_params = {} for name, val in inspect.getmembers(self): if inspect.ismethod(val) or name[0] == '_': continue if type(val) == list: variation_params[name] = val else: fixed_params[name] = val self._set_sim_ids(fixed_params, variation_params) self._variation_params = variation_params self._fixed_params = fixed_params def _set_sim_ids(self, fixed_params, variation_params): if len(variation_params) == 0: self._sim_ids.append(copy.copy(fixed_params)) elif len(variation_params) == 1: for name, values in variation_params.items(): # just one, really for value in values: sim_params = copy.copy(fixed_params) sim_params[name] = value self._sim_ids.append(sim_params) elif len(variation_params) == 2: n1, n2 = tuple(variation_params.keys()) v1s = variation_params[n1] v2s = variation_params[n2] for v1 in v1s: for v2 in v2s: sim_params = copy.copy(fixed_params) sim_params[n1] = v1 sim_params[n2] = v2 self._sim_ids.append(sim_params) else: raise Exception('Maximum of 2 parameters varying') def prepare_sim(self, params): raise NotImplementedError('Use a concrete subclass') def _run(self, sim_id, params): pr = self.prepare_sim(params) sim = pr['sim'] for view in self._views: view.sim_id = sim_id sim.evolve(initOps=pr['init_ops'], preOps=pr['pre_ops'], postOps=pr['post_ops'], matingScheme=pr['mating_scheme'], gen=self._gens) for view in self._views: view.complete_sim() def run(self): self.prepare_sim_vars() for view in self._views: view.start() for params in self._sim_ids: self._sims.append(self.prepare_sim(params)) for i, params in enumerate(self._sim_ids): self._run(i, params) for view in self._views: view.end() class SinglePop(Model): def prepare_sim(self, params): for view in self._views: for info in view.info_fields: self._info_fields.add(info) if params['num_snps'] > 0: pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['pop_size'], params['num_snps']) loci, genome_init = self._create_snp_genome( params['num_snps'], freq=params['snp_freq']) gpre_ops = [] else: pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['pop_size'], params['num_msats']) loci, genome_init, gpre_ops = self._create_genome( params['num_msats'], mut=params['mut_msat'], start_alleles=params['num_msat_alleles']) view_ops = [] for view in self._views: view.pop = pop view_ops.extend(view.view_ops) for view in self._views: post_ops.append(sp.PyOperator(func=_hook_view, param=view)) post_ops = view_ops + post_ops sim = sp.Simulator(pop, 1, True) return {'sim': sim, 'init_ops': init_ops + genome_init, 'pre_ops': pre_ops + gpre_ops, 'post_ops': post_ops, 'mating_scheme': sp.RandomMating()} def _draw_sim(self, ax, sim_params): pop_size = sim_params['pop_size'] ax.plot([0, self._gens], [0, 0], 'b') ax.plot([0, self._gens], [pop_size, pop_size], 'b') ax.set_xlim(0, self._gens) if type(self.pop_size) == list: pop_sizes = self.pop_size else: pop_sizes = [self.pop_size] ax.set_ylim(-10, 1.1 * max(pop_sizes)) class LinkagePop(SinglePop): def __init__(self, gens, distance=None): Model.__init__(self, gens) self.distance = distance self.num_snps = 2 def prepare_sim(self, params): return SinglePop.prepare_sim(self, params) def _create_snp_genome(self, num_snps, freq): if self.distance is None: return SinglePop._create_snp_genome(self, num_snps, freq) init_ops = [] loci = num_snps * [1] for snp in range(num_snps): init_ops.append(sp.InitGenotype(freq=[1 - freq, freq], loci=snp)) return loci, init_ops class Bottleneck(Model): def prepare_sim(self, params): for view in self._views: for info in view.info_fields: self._info_fields.add(info) pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['start_size'], params['num_msats']) if params['num_snps'] > 0: pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['start_size'], params['num_snps']) loci, genome_init = self._create_snp_genome( params['num_snps'], freq=params['snp_freq']) gpre_ops = [] else: pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['start_size'], params['num_msats']) loci, genome_init, gpre_ops = self._create_genome( params['num_msats'], start_alleles=params['num_msat_alleles']) view_ops = [] for view in self._views: view.pop = pop view_ops.extend(view.view_ops) for view in self._views: post_ops.append(sp.PyOperator(func=_hook_view, param=view)) post_ops = view_ops + post_ops sim = sp.Simulator(pop, 1, True) pre_ops.append(sp.ResizeSubPops( proportions=(params['end_size'] / params['start_size'],), at=params['bgen'])) return {'sim': sim, 'pop': pop, 'init_ops': init_ops + genome_init, 'pre_ops': pre_ops + gpre_ops, 'post_ops': post_ops, 'mating_scheme': sp.RandomMating()} def _draw_sim(self, ax, sim_params): start_size = sim_params['start_size'] end_size = sim_params['end_size'] bgen = sim_params['bgen'] ax.plot([0, self._gens], [0, 0], 'b') ax.plot([0, bgen], [start_size, start_size], 'b') ax.plot([bgen, bgen], [start_size, end_size], 'b') ax.plot([bgen, self._gens], [end_size, end_size], 'b') ax.set_xlim(0, self._gens) if type(self.start_size) == list: pop_sizes = self.start_size else: pop_sizes = [self.start_size] if type(self.end_size) == list: pop_sizes.extend(self.end_size) else: pop_sizes.append(self.end_size) ax.set_ylim(-10, 1.1 * max(pop_sizes)) class SelectionPop(Model): def __init__(self, gens): Model.__init__(self, gens) self.sel = 0.01 self.neutral_loci = 0 def prepare_sim(self, params): for view in self._views: for info in view.info_fields: self._info_fields.add(info) nloci = 1 + params['neutral_loci'] pop, init_ops, pre_ops, post_ops = \ self._create_single_pop(params['pop_size'], nloci) view_ops = [] for view in self._views: view.pop = pop view_ops.extend(view.view_ops) for view in self._views: post_ops.append(sp.PyOperator(func=_hook_view, param=view)) post_ops = view_ops + post_ops loci, genome_init = self._create_snp_genome( nloci, freq=params['snp_freq']) sim = sp.Simulator(pop, 1, True) if params['sel_type'] == 'hz_advantage': ms = sp.MapSelector(loci=0, fitness={ (0, 0): 1 - params['sel'], (0, 1): 1, (1, 1): 1 - params['sel']}) elif params['sel_type'] == 'recessive': ms = sp.MapSelector(loci=0, fitness={ (0, 0): 1 - params['sel'], (0, 1): 1 - params['sel'], (1, 1): 1}) else: # dominant ms = sp.MapSelector(loci=0, fitness={ (0, 0): 1 - params['sel'], (0, 1): 1, (1, 1): 1}) return {'sim': sim, 'pop': pop, 'init_ops': init_ops + genome_init, 'pre_ops': pre_ops, 'post_ops': post_ops, 'mating_scheme': sp.RandomMating( ops=[sp.MendelianGenoTransmitter(), ms])} class Island(Model): def __init__(self, gens): Model.__init__(self, gens) self.num_pops = 5 self.mig = 0.01 def prepare_sim(self, params): for view in self._views: for info in view.info_fields: self._info_fields.add(info) if params['num_snps'] > 0: pop, init_ops, pre_ops, post_ops = \ self._create_island([params['pop_size']] * params['num_pops'], params['mig'], params['num_snps']) loci, genome_init = self._create_snp_genome( params['num_snps'], freq=params['snp_freq']) gpre_ops = [] else: pop, init_ops, pre_ops, post_ops = \ self._create_island([params['pop_size']] * params['num_pops'], params['mig'], params['num_msats']) loci, genome_init, gpre_ops = self._create_genome( params['num_msats'], start_alleles=params['num_msat_alleles']) view_ops = [] for view in self._views: view.pop = pop view_ops.extend(view.view_ops) for view in self._views: post_ops.append(sp.PyOperator(func=_hook_view, param=view)) post_ops = view_ops + post_ops sim = sp.Simulator(pop, 1, True) return {'sim': sim, 'pop': pop, 'init_ops': init_ops + genome_init, 'pre_ops': pre_ops, 'post_ops': post_ops, 'mating_scheme': sp.RandomMating()} def _draw_sim(self, ax, sim_params): graph = nx.Graph() num_pops = sim_params['num_pops'] gnames = ['P%d: %d' % (g + 1, sim_params['pop_size'], ) for g in range(num_pops)] for g in range(num_pops): graph.add_node(gnames[g]) for g1 in range(num_pops - 1): for g2 in range(g1 + 1, num_pops): graph.add_edge(gnames[g1], gnames[g2]) nx.draw_circular(graph, node_color='c', ax=ax) xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() pos = ymin for var in self._variation_params: if var == 'mig': continue ax.text(xmin, pos, '%s: %d' % (var, int(sim_params[var])), va='top', ha='left') pos = (ymax - ymin) / 2 + ymin ax.text(xmin, ymax, 'mig: %f' % sim_params['mig'], va='top', ha='left') class SteppingStone(Model): def __init__(self, gens, two_d=False): Model.__init__(self, gens) self.num_pops_x = 5 self.mig = 0.01 self._two_d = two_d self.num_pops_y = None def prepare_sim(self, params): for view in self._views: for info in view.info_fields: self._info_fields.add(info) if params['num_snps'] > 0: nloci = params['num_snps'] else: nloci = params['num_msats'] if self._two_d: pop, init_ops, pre_ops, post_ops = \ self._create_stepping_stone( [[params['pop_size']] * params['num_pops_x']] * params['num_pops_y'], params['mig'], nloci) else: pop, init_ops, pre_ops, post_ops = \ self._create_stepping_stone( [[params['pop_size']] * params['num_pops_x']], params['mig'], nloci) if params['num_snps'] > 0: loci, genome_init = self._create_snp_genome( params['num_snps'], freq=params['snp_freq']) gpre_ops = [] else: loci, genome_init, gpre_ops = self._create_genome( params['num_msats'], start_alleles=params['num_msat_alleles']) view_ops = [] for view in self._views: view.pop = pop view_ops.extend(view.view_ops) for view in self._views: post_ops.append(sp.PyOperator(func=_hook_view, param=view)) post_ops = view_ops + post_ops sim = sp.Simulator(pop, 1, True) return {'sim': sim, 'pop': pop, 'init_ops': init_ops + genome_init, 'pre_ops': pre_ops, 'post_ops': post_ops, 'mating_scheme': sp.RandomMating()} def _draw_sim(self, ax, sim_params): if self._two_d: y = sim_params['num_pops_y'] else: y = 1 ax.set_axis_off() ax.set_ylim(0, 1 + y) ax.set_xlim(0, 1 + sim_params['num_pops_x']) for j in range(y): for i in range(sim_params['num_pops_x']): el = Ellipse((i + 1, j + 1), 0.5, 0.5, ec="none") ax.add_patch(el) if i > 0: ax.plot([i + .25, i + .75], [j + 1, j + 1], 'k') if j > 0: ax.plot([i + 1, i + 1], [j + .25, j + .75], 'k')
agpl-3.0
myselfHimanshu/Udacity-DataML
Intro to Machine Learning/naive_bayes/nb_author_id.py
2
1107
#!/usr/bin/python """ this is the code to accompany the Lesson 1 (Naive Bayes) mini-project use a Naive Bayes Classifier to identify emails by their authors authors and labels: Sara has label 0 Chris has label 1 """ import sys from time import time sys.path.append("../tools/") from email_preprocess import preprocess ### features_train and features_test are the features for the training ### and testing datasets, respectively ### labels_train and labels_test are the corresponding item labels features_train, features_test, labels_train, labels_test = preprocess() ######################################################### ### your code goes here ### from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score clf=GaussianNB() t0=time() clf.fit(features_train,labels_train) print "Training Time " ,round(time()-t0,3)," s" t1=time() pred=clf.predict(features_test) print "predicting time ",round(time()-t1,3)," s" accuracy=accuracy_score(pred,labels_test) print accuracy #########################################################
gpl-2.0
nuclear-wizard/moose
scripts/memory_logger.py
12
54747
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html from tempfile import TemporaryFile, SpooledTemporaryFile import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform, signal class Debugger: """ The Debugger class is the entry point to our stack tracing capabilities. It determins which debugger to inherit based on parsed arguments and platform specs. """ def __init__(self, arguments): if arguments.debugger == 'lldb': self.debugger = lldbAPI(arguments) else: self.debugger = DebugInterpreter(arguments) def getProcess(self, pid): return self.debugger.getProcess(pid) def getStackTrace(self, getProcess_tuple): return self.debugger.getStackTrace(getProcess_tuple) class lldbAPI: def __init__(self, arguments): self.debugger = lldb.SBDebugger.Create() self.debugger.SetAsync(True) def __del__(self): lldb.SBDebugger.Destroy(self.debugger) def getProcess(self, pid): # Create and attach to the pid and return our debugger as a tuple target = self.debugger.CreateTargetWithFileAndArch(None, None) return target, pid def getStackTrace(self, process_tuple): target, pid = process_tuple lldb_results = [] # reuse the process object if available if target.process.id is not 0: process = target.Attach(lldb.SBAttachInfo(target.process.id), lldb.SBError()) else: process = target.Attach(lldb.SBAttachInfo(int(pid)), lldb.SBError()) # test if we succeeded at attaching to PID process if process: # grab thread information lldb_results.append(process.GetThreadAtIndex(0).__str__()) # iterate through all frames and collect back trace information for i in xrange(process.GetThreadAtIndex(0).GetNumFrames()): lldb_results.append(process.GetThreadAtIndex(0).GetFrameAtIndex(i).__str__()) # Unfortunately we must detach each time we perform a stack # trace. This severely limits our sample rate. It _appears_ to # to be a bug in LLDB's Python API. Otherwise we would be able to: # # process.Stop() # ..collect back trace.. # process.Continue() # # instead we have to: process.Detach() return '\n'.join(lldb_results) else: return '' class DebugInterpreter: """ Currently, interfacing with LLDB via subprocess is impossible. This is due to lldb not printing to stdout, or stderr when displaying the prompt to the user (informing the user, the debugger is ready to receive input). However, this class may someday be able to, which is why the self.debugger variable is present. """ def __init__(self, arguments): self.last_position = 0 self.debugger = arguments.debugger def _parseStackTrace(self, gibberish): not_gibberish = re.findall(r'\(' + self.debugger + '\) (#.*)\(' + self.debugger + '\)', gibberish, re.DOTALL) if len(not_gibberish) != 0: return not_gibberish[0] else: # Return a blank line, as to not pollute the log. Gibberish here # usually indicates a bunch of warnings or information about # loading symbols return '' def _waitForResponse(self, dbg_stdout): # Allow a maximum of 5 seconds to obtain a debugger prompt position. # Otherwise we can hang indefinitely end_queue = time.time() + float(5) while time.time() < end_queue: dbg_stdout.seek(self.last_position) for line in dbg_stdout: if line == '(' + self.debugger + ') ': self.last_position = dbg_stdout.tell() return True time.sleep(0.01) return False def getProcess(self, pid): # Create a temporary file the debugger can write stdout/err to dbg_stdout = SpooledTemporaryFile() # Create and attach to running proccess process = subprocess.Popen([which(self.debugger)], stdin=subprocess.PIPE, stdout=dbg_stdout, stderr=dbg_stdout) for command in [ 'attach ' + pid + '\n' ]: if self._waitForResponse(dbg_stdout): try: process.stdin.write(command) except: return (False, self.debugger, 'quit unexpectedly') else: return (False, 'could not attach to process in allotted time') return (process, dbg_stdout) def getStackTrace(self, process_tuple): process, dbg_stdout = process_tuple # Store our current file position so we can return to it and read # the eventual entire stack trace output batch_position = dbg_stdout.tell() # Loop through commands necessary to create a back trace for command in ['ctrl-c', 'bt\n', 'c\n']: if command == 'ctrl-c': process.send_signal(signal.SIGINT) else: if self._waitForResponse(dbg_stdout): process.stdin.write(command) else: dbg_stdout.seek(batch_position) return self.detachProcess(process_tuple) # Return to previous file position so that we can return the entire # stack trace dbg_stdout.seek(batch_position) return self._parseStackTrace(dbg_stdout.read()) def detachProcess(self, process): process, dbg_stdout = process # Offset the position due to ctrl-c not generating a newline event tmp_position = (dbg_stdout.tell() - 1) for command in ['ctrl-c', 'quit\n', 'y\n']: if command == 'ctrl-c': process.send_signal(signal.SIGINT) else: # When these two variables are not equal, its a safe assumption the # debugger is ready to receive input if tmp_position != dbg_stdout.tell(): tmp_position = dbg_stdout.tell() try: process.stdin.write(command) except: # Because we are trying to detach and quit the debugger just pass pass # Always return True for a detach call. What would we do if it failed anyway? # Why am I even leaving a comment about this? return True class Server: def __init__(self, arguments): self.arguments = arguments self.arguments.cwd = os.getcwd() # Test to see if we are starting as a server if self.arguments.pbs == True: if os.getenv('PBS_NODEFILE') != None: # Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server' self.agent = Agent(self.arguments, 'server') if self.arguments.recover: self.logfile = WriteCSV(self.arguments.outfile[0], False) else: self.logfile = WriteCSV(self.arguments.outfile[0], True) self.client_connections = [] self.startServer() else: print 'I could not find your PBS_NODEFILE. Is PBS loaded?' sys.exit(1) # If we are not a server, start the single client else: self.startClient() def startServer(self): # Setup the TCP socket self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_socket.bind((socket.gethostname(), 0)) self.server_socket.listen(5) (self.host, self.port) = self.server_socket.getsockname() # We will store all connections (sockets objects) made to the server in a list self.client_connections.append(self.server_socket) # Launch the actual binary we want to track self._launchJob() # Now launch all pbs agents self._launchClients() # This is a try so we can handle a keyboard ctrl-c try: # Continue to listen and accept active connections from agents # until all agents report a STOP command. AGENTS_ACTIVE = True while AGENTS_ACTIVE: read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[]) for sock in read_sockets: if sock == self.server_socket: # Accept an incomming connection self.client_connections.append(self.server_socket.accept()[0]) else: # Deal with the data being sent to the server by its agents self.handleAgent() # Check to see if _all_ agents are telling the server to stop agent_count = len(self.agent.agent_data.keys()) current_count = 0 for agent in self.agent.agent_data.keys(): if self.agent.agent_data[agent]['STOP']: current_count += 1 # if All Agents have reported a STOP command, begin to exit if current_count == agent_count: AGENTS_ACTIVE = False # Gotta get out of the for loop somehow... break # Sleep a bit before reading additional data time.sleep(self.arguments.repeat_rate[-1]) # Close the server socket self.server_socket.close() # Close the logfile as the server is about to exit self.logfile.close() # Cancel server operations if ctrl-c was pressed except KeyboardInterrupt: print 'Canceled by user. Wrote log:', self.arguments.outfile[0] sys.exit(0) # Normal exiting procedures print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0] sys.exit(0) def startClient(self): Client(self.arguments) def _launchClients(self): # Read the environment PBS_NODEFILE self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r') nodes = set(self._PBS_NODEFILE.read().split()) # Print some useful information about our setup print 'Memory Logger running on Host:', self.host, 'Port:', self.port, \ '\nNodes:', ', '.join(nodes), \ '\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)', \ '\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n' # Build our command list based on the PBS_NODEFILE command = [] for node in nodes: command.append([ 'ssh', node, 'bash --login -c "source /etc/profile && ' \ + 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \ + os.path.abspath(__file__) \ + ' --call-back-host ' \ + self.host + ' ' + str(self.port) \ + '"']) # remote into each node and execute another copy of memory_logger.py # with a call back argument to recieve further instructions for pbs_node in command: subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Launch the binary we intend to track def _launchJob(self): subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log) # A connection has been made from client to server # Capture that data, and determin what to do with it def handleAgent(self): # Loop through all client connections, and receive data if any for agent_socket in self.client_connections: # Completely ignore the server_socket object if agent_socket == self.server_socket: continue # Assign an AgentConnector for the task of handling data between client and server reporting_agent = AgentConnector(self.arguments, agent_socket) # OK... get data from a client and begin new_data = reporting_agent.readData() if new_data != None: # There should be only one dictionary key (were reading data from just one client at a time) agent_uuid = new_data.keys()[0] # Update our dictionary of an agents data self.agent.agent_data[agent_uuid] = new_data[agent_uuid] # Modify incoming Agents timestamp to match Server's time (because every node is a little bit off) if self.arguments.recover: self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta else: self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now # update total usage for all known reporting agents total_usage = 0 for one_agent in self.agent.agent_data.keys(): total_usage += self.agent.agent_data[one_agent]['MEMORY'] self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage) # Get any stdout thats happened thus far and apply it to what ever agent just sent us data self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout() # Write to our logfile self.logfile.write(self.agent.agent_data[agent_uuid]) # Check for any agents sending a stop command. If we find one, # set some zeroing values, and close that agent's socket. if self.agent.agent_data[agent_uuid]['STOP']: self.agent.agent_data[agent_uuid]['MEMORY'] = 0 agent_socket.close() if agent_socket != self.server_socket: self.client_connections.remove(agent_socket) # Go ahead and set our server agent to STOP as well. # The server will continue recording samples from agents self.agent.agent_data['server']['STOP'] = True # If an Agent has made a request for instructions, handle it here update_client = False if new_data[agent_uuid]['REQUEST'] != None: for request in new_data[agent_uuid]['REQUEST'].iteritems(): if new_data[agent_uuid]['REQUEST'][request[0]] == '': update_client = True # We only support sending any arguments supplied to ther server, back to the agent for request_type in dir(self.arguments): if request[0] == str(request_type): self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0]) # If an Agent needed additional instructions, go ahead and re-send those instructions if update_client: reporting_agent.sendData(self.agent.agent_data[agent_uuid]) class Client: def __init__(self, arguments): self.arguments = arguments # Initialize an Agent with a UUID based on our hostname self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname()))) # Initialize an AgentConnector self.remote_server = AgentConnector(self.arguments) # If client will talk to a server (PBS) if self.arguments.call_back_host: # We know by initializing an agent, agent_data contains the necessary message asking for further instructions self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data) # Apply new instructions received from server (this basically updates our arguments) for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems(): for request_type in dir(self.arguments): if request[0] == str(request_type): setattr(self.arguments, request[0], request[1]) # Requests have been satisfied, set to None self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None # Change to the same directory as the server was when initiated (needed for PBS stuff) os.chdir(self.arguments.cwd) # Client will not be talking to a server, save data to a file instead else: # Deal with --recover if self.arguments.recover: # Do not overwrite the file self.logfile = WriteCSV(self.arguments.outfile[0], False) else: # Overwrite the file self.logfile = WriteCSV(self.arguments.outfile[0], True) # Lets begin! self.startProcess() # This function handles the starting and stoping of the sampler process. # We loop until an agent returns a stop command. def startProcess(self): AGENTS_ACTIVE = True # If we know we are the only client, go ahead and start the process we want to track. if self.arguments.call_back_host == None: subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log) # Delay just a bit to keep from recording a possible zero memory usage as the binary starts up time.sleep(self.arguments.sample_delay[0]) # This is a try so we can handle a keyboard ctrl-c try: # Continue to process data until an Agent reports a STOP command while AGENTS_ACTIVE: # Take a sample current_data = self.my_agent.takeSample() # Handle the data supplied by the Agent. self._handleData(current_data) # If an Agent reported a STOP command, go ahead and begin the shutdown phase if current_data[current_data.keys()[0]]['STOP']: AGENTS_ACTIVE = False # Sleep just a bit between samples, as to not saturate the machine time.sleep(self.arguments.repeat_rate[-1]) # An agent reported a stop command... so let everyone know where the log was saved, and exit! if self.arguments.call_back_host == None: print 'Binary has exited and a log file has been written. You can now attempt to view this file by running' \ '\nthe memory_logger with either the --plot or --read arguments:\n\n', sys.argv[0], '--plot', self.arguments.outfile[0], \ '\n\nSee --help for additional viewing options.' # Cancel server operations if ctrl-c was pressed except KeyboardInterrupt: self.logfile.close() print 'Canceled by user. Wrote log:', self.arguments.outfile[0] sys.exit(0) # Everything went smooth. sys.exit(0) # Figure out what to do with the sampled data def _handleData(self, data): # Sending the sampled data to a server if self.arguments.call_back_host: self.remote_server.sendData(data) # Saving the sampled data to a file else: # Compute the TOTAL memory usage to be how much our one agent reported # Because were the only client doing any work data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY'] self.logfile.write(data[self.my_agent.my_uuid]) # If the agent has been told to stop, close the database file if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True: self.logfile.close() class AgentConnector: """ Functions used to communicate to and from Client and Server. Both Client and Server classes use this object. readData() sendData('message', socket_connection=None) if sendData's socket_connection is None, it will create a new connection to the server based on supplied arguments """ def __init__(self, arguments, connection=None): self.arguments = arguments self.connection = connection self.CREATED_CONNECTION = False # If the connection is None, meaning this object was instanced by a client, # we must create a connection to the server first if self.connection == None and self.arguments.call_back_host != None: self.CREATED_CONNECTION = True self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1]))) # read all data sent by an agent def readData(self): # Get how much data there is to receive # The first eight bytes is our data length data_width = int(self.connection.recv(8)) tmp_received = '' # We need to receive precisely the ammount of data the # client is trying to send us. while len(tmp_received) < data_width: if data_width - len(tmp_received) > 1024: tmp_received += self.connection.recv(1024) else: tmp_received += self.connection.recv(data_width - (len(tmp_received))) # unpickle the received message return self._unpickleMessage(tmp_received) # send data to an agent def sendData(self, message): # pickle the data up, and send the message self.connection.sendall(self._pickleMessage(message)) # If we had to create the socket (connection was none), and this client/agent is requesting # instructions, go ahead and read the data that _better be there_ sent to us by the server. if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None: return self.readData() # The following two functions pickle up the data for easy socket transport def _pickleMessage(self, message): t = TemporaryFile() pickle.dump(message, t) t.seek(0) str_msg = t.read() str_len = len(str_msg) message = "%-8d" % (str_len,) + str_msg return message def _unpickleMessage(self, message): t = TemporaryFile() t.write(message) t.seek(0) try: return pickle.load(t) except KeyError: print 'Socket data was not pickled data: ', message except: raise class WriteCSV: def __init__(self, logfile, overwrite): if overwrite: self.file_object = open(logfile, 'w', 1) else: self.file_object = open(logfile, 'a', 1) csv.field_size_limit(sys.maxsize) self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL) # Close the logfile def close(self): self.file_object.close() # Write a CSV row def write(self, data): formatted_string = self._formatString(data) self.log_file.writerow(formatted_string) # Format the CSV output def _formatString(self, data): # We will be saving this data in CSV format. Before we do, lets format it a bit here format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY'] formatted_text = [] for item in format_order: # We have to handle python's way of formatting floats to strings specially if item == 'TIMESTAMP': formatted_text.append('%.6f' % data[item]) else: formatted_text.append(data[item]) return formatted_text class Agent: """ Each agent object contains its own sampled log data. The Agent class is responsible for collecting and storing data. machine_id is used to identify the agent. machine_id is supplied by the client class. This allows for multiple agents if desired """ def __init__(self, arguments, machine_id): self.arguments = arguments self.my_uuid = machine_id self.track_process = '' self.process = None # This log object is for stdout purposes self.log = TemporaryFile() self.log_position = 0 # Discover if --recover is being used. If so, we need to obtain the # timestamp of the last entry in the outfile log... a little bulky # to do... and not a very good place to do it. if self.arguments.recover: if os.path.exists(self.arguments.outfile[-1]): memory_list = [] history_file = open(self.arguments.outfile[-1], 'r') csv.field_size_limit(sys.maxsize) reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL) # Get last item in list. Unfortunately, no way to do this until # we have read the entire file...? Lucky for us, most memory log # files are in the single digit megabytes for row in reader: memory_list.append(row) history_file.close() last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1] self.delta = (GetTime().now - last_entry) else: print 'Recovery options detected, but I could not find your previous memory log file.' sys.exit(1) else: self.delta = 0 # Create the dictionary to which all sampled data will be stored # NOTE: REQUEST dictionary items are instructions (arguments) we will # ask the server to provide (if we are running with --pbs) # Simply add them here. We _can not_ make the arguments match the # server exactly, this would cause every agent launched to perform # like a server... bad stuff # Example: We added repeat_rate (see dictionary below). Now every # agent would update their repeat_rate according to what the user # supplied as an argument (--repeat_rate 0.02) self.agent_data = { self.my_uuid : { 'HOSTNAME' : socket.gethostname(), 'STDOUT' : '', 'STACK' : '', 'MEMORY' : 0, 'TIMESTAMP' : GetTime().now - self.delta, 'REQUEST' : { 'run' : '', 'pstack' : '', 'repeat_rate' : '', 'cwd' : '', 'debugger' : ''}, 'STOP' : False, 'TOTAL' : 0, 'DEBUG_LOG' : '' } } # we need to create a place holder for our debugger because when # memory_logger is run via --pbs, this Agent will not know what # kind of debugger to use until it has made contact with the server self.stack_trace = None # NOTE: This is the only function that should be called in this class def takeSample(self): if self.arguments.pstack: if self.stack_trace is None: self.stack_trace = Debugger(self.arguments) self.agent_data[self.my_uuid]['STACK'] = self._getStack() # Always do the following self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory() self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout() if self.arguments.recover: self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta else: self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now # Return the data to whom ever asked for it return self.agent_data def _getStdout(self): self.log.seek(self.log_position) output = self.log.read() self.log_position = self.log.tell() sys.stdout.write(output) return output def _getMemory(self): tmp_pids = self._getPIDs() memory_usage = 0 if tmp_pids != {}: for single_pid in tmp_pids.iteritems(): memory_usage += int(single_pid[1][0]) if memory_usage == 0: # Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same. self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stopping' self.agent_data[self.my_uuid]['STOP'] = True return 0 return int(memory_usage) # No binay even detected? Lets assume it exited, so we should begin doing the same. self.agent_data[self.my_uuid]['STOP'] = True self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping' return 0 def _getStack(self): # Create a process object if none already exists. Reuse the old one if it does. if self.process is None: tmp_pids = self._getPIDs() # Check if we actually found any running processes if tmp_pids != {}: # Obtain a single process id, any process id will do. This will be the process we attach to and perform stack traces one_pid = tmp_pids.keys()[0] self.process = self.stack_trace.getProcess(str(one_pid)) return self.stack_trace.getStackTrace(self.process) else: return '' else: return self.stack_trace.getStackTrace(self.process) def _getPIDs(self): pid_list = {} # Determin the binary to sample and store it. Doing the findCommand is a little expensive. if self.track_process == '': self.track_process = self._findCommand(''.join(self.arguments.run)) # If we are tracking a binary if self.arguments.run: command = [which('ps'), '-e', '-o', 'pid,rss,user,args'] tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) all_pids = tmp_proc.communicate()[0].split('\n') # Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc) for single_pid in all_pids: if single_pid.find(self.track_process) != -1 and \ single_pid.find(__file__) == -1 and \ single_pid.find('mpirun') == -1 and \ single_pid.find(os.getenv('USER')) != -1 and \ single_pid.find('mpiexec') == -1: pid_list[int(single_pid.split()[0])] = [] pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]]) return pid_list # Determine the command we are going to track # A few things are happening here; first we strip off any MPI commands # we then loop through the remaining items until we find a matching path # exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6 # would first strip off mpiexec, check for the presence of -n in our # current directory, then 12, then ../../../moose_test-opt <- found. It would # stop and return the base name (moose_test-opt). def _findCommand(self, command): if command.find('mpiexec') == 0 or command.find('mpirun') == 0: for binary in command.split(): if os.path.exists(binary): return os.path.split(binary)[1] elif os.path.exists(command.split()[0]): return os.path.split(command.split()[0])[1] class GetTime: """A simple formatted time object. """ def __init__(self, posix_time=None): import datetime if posix_time == None: self.posix_time = datetime.datetime.now() else: self.posix_time = datetime.datetime.fromtimestamp(posix_time) self.now = float(datetime.datetime.now().strftime('%s.%f')) self.microsecond = self.posix_time.microsecond self.second = self.posix_time.second self.minute = self.posix_time.strftime('%M') self.hour = self.posix_time.strftime('%H') self.day = self.posix_time.strftime('%d') self.month = self.posix_time.strftime('%m') self.year = self.posix_time.year self.dayname = self.posix_time.strftime('%a') self.monthname = self.posix_time.strftime('%b') class MemoryPlotter: def __init__(self, arguments): self.arguments = arguments self.buildGraph() def buildPlots(self): plot_dictionary = {} for log in self.arguments.plot: memory_list = [] if os.path.exists(log): log_file = open(log, 'r') csv.field_size_limit(sys.maxsize) reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL) for row in reader: memory_list.append(row) log_file.close() plot_dictionary[log.split('/')[-1:][0]] = memory_list else: print 'log not found:', log sys.exit(1) return plot_dictionary def buildGraph(self): try: import matplotlib.pyplot as plt except ImportError: print 'Error importing matplotlib. Matplotlib not available on this system?' sys.exit(1) plot_dictionary = self.buildPlots() fig = plt.figure() plot_list = [] tmp_plot = [] tmp_legend = [] self.stdout_msgs = {} self.pstack_msgs = {} self.multiples = 1 self.memory_label = 'Memory in Bytes' # Try and calculate memory sizes, so we can move annotations around a bit more accurately largest_memory = [] for plot_name, value_list in plot_dictionary.iteritems(): for records in value_list: largest_memory.append(int(records[1])) largest_memory.sort() # Determine the scale of the graph suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"] multiplier = 1 << 40; index = 0 while largest_memory[-1] < multiplier and multiplier > 1: multiplier = multiplier >> 10 index = index + 1 self.multiples = multiplier self.memory_label = "Memory in " + suffixes[index-1] # Loop through each log file for plot_name, value_list in plot_dictionary.iteritems(): plot_list.append(fig.add_subplot(111)) tmp_memory = [] tmp_time = [] tmp_stdout_x = [] tmp_stdout_y = [] tmp_pstack_x = [] tmp_pstack_y = [] stdout_msg = [] pstack_msg = [] # Get the start time, and make this 0 try: tmp_zero = decimal.Decimal(value_list[0][0]) except: print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?' sys.exit(1) # Populate the graph for records in value_list: tmp_memory.append(decimal.Decimal(records[1]) / self.multiples) tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero)) if len(records[2]) > 0 and self.arguments.stdout: tmp_stdout_x.append(tmp_time[-1]) tmp_stdout_y.append(tmp_memory[-1]) stdout_msg.append(records[2]) if len(records[3]) > 0 and self.arguments.pstack: tmp_pstack_x.append(tmp_time[-1]) tmp_pstack_y.append(tmp_memory[-1]) pstack_msg.append(records[3]) # Do the actual plotting: f, = plot_list[-1].plot(tmp_time, tmp_memory) tmp_plot.append(f) tmp_legend.append(plot_name) plot_list[-1].grid(True) plot_list[-1].set_ylabel(self.memory_label) plot_list[-1].set_xlabel('Time in Seconds') # Enable dork mode if self.arguments.darkmode: fig.set_facecolor('0.1') plot_list[-1].set_axis_bgcolor('0.1') plot_list[-1].spines['bottom'].set_color('white') plot_list[-1].spines['top'].set_color('white') plot_list[-1].spines['right'].set_color('white') plot_list[-1].spines['left'].set_color('white') plot_list[-1].tick_params(axis='x', colors='white') plot_list[-1].tick_params(axis='y', colors='white') plot_list[-1].xaxis.label.set_color('white') plot_list[-1].yaxis.label.set_color('white') plot_list[-1].grid(color='0.6') # Plot annotations if self.arguments.stdout: stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1) next_index = str(len(plot_list)) stdout_line.set_gid('stdout' + next_index) self.stdout_msgs[next_index] = stdout_msg self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color()) if self.arguments.pstack: pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1) next_index = str(len(plot_list)) pstack_line.set_gid('pstack' + next_index) self.pstack_msgs[next_index] = pstack_msg # Make points clickable fig.canvas.mpl_connect('pick_event', self) # Create legend legend = plt.legend(tmp_plot, tmp_legend, loc = self.arguments.legend) legend.get_frame().set_alpha(0.7) # More dork mode settings if self.arguments.darkmode: legend.get_frame().set_facecolor('0.2') for text in legend.get_texts(): text.set_color('0.8') plt.show() def __call__(self, event): color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' } line = event.artist ind = event.ind name = line.get_gid()[:-1] index = line.get_gid()[-1] if self.arguments.stdout and name == 'stdout': if self.arguments.no_color != False: print color_codes[line.get_color()] print "stdout -----------------------------------------------------\n" for id in ind: print self.stdout_msgs[index][id] if self.arguments.no_color != False: print color_codes['RESET'] if self.arguments.pstack and name == 'pstack': if self.arguments.no_color != False: print color_codes[line.get_color()] print "pstack -----------------------------------------------------\n" for id in ind: print self.pstack_msgs[index][id] if self.arguments.no_color != False: print color_codes['RESET'] def buildAnnotation(self,fig,x,y,msg,c): for i in range(len(x)): fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]), xy=(x[i], y[i]), rotation=self.arguments.rotate_text[-1], xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])), color=c, horizontalalignment='center', verticalalignment='bottom', arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.5", color=c ) ) class ReadLog: """Read a memory_logger log file, and display the results to stdout in an easy to read form. """ def __init__(self, arguments): self.arguments = arguments history_file = open(self.arguments.read[-1], 'r') reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL) self.memory_list = [] for row in reader: self.memory_list.append(row) history_file.close() self.sorted_list = [] self.mem_list = [] self.use_nodes = False self.printHistory() def printHistory(self): RESET = '\033[0m' BOLD = '\033[1m' BLACK = '\033[30m' RED = '\033[31m' GREEN = '\033[32m' CYAN = '\033[36m' YELLOW = '\033[33m' last_memory = 0.0 (terminal_width, terminal_height) = self.getTerminalSize() for timestamp in self.memory_list: to = GetTime(float(timestamp[0])) total_memory = int(timestamp[1]) log = timestamp[2].split('\n') pstack = timestamp[3].split('\n') node_name = str(timestamp[4]) node_memory = int(timestamp[5]) self.mem_list.append(total_memory) self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory]) largest_memory = decimal.Decimal(max(self.mem_list)) if len(set([x[4] for x in self.sorted_list])) > 1: self.use_nodes = True print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )' for item in self.sorted_list: tmp_str = '' if decimal.Decimal(item[1]) == largest_memory: tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width) elif item[1] > last_memory: tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width) elif item[1] == last_memory: tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width) else: tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width) last_memory = item[1] sys.stdout.write(tmp_str) print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )' def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width): RESET = '\033[0m' if decimal.Decimal(total_memory) == largest_memory: percent = '100' elif (decimal.Decimal(total_memory) / largest_memory) == 0: percent = '0' else: percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6] header = len(date) + 18 footer = len(percent) + 6 additional_correction = 0 max_length = decimal.Decimal(terminal_width - header) / largest_memory total_position = total_memory * decimal.Decimal(max_length) node_position = node_memory * decimal.Decimal(max_length) tmp_log = '' if self.arguments.stdout: for single_log in log: if single_log != '': tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n' if self.arguments.pstack: for single_pstack in pstack: if single_pstack != '': tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n' if self.arguments.separate and self.use_nodes != False: message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >' additional_correction = len(RESET) + len(color_code) elif self.use_nodes: message = '< >' else: node_position = 0 message = '' return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log def getTerminalSize(self): """Quicky to get terminal window size""" env = os.environ def ioctl_GWINSZ(fd): try: import fcntl, termios, struct, os cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) except: return None return cr cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) except: pass if not cr: try: cr = (env['LINES'], env['COLUMNS']) except: cr = (25, 80) return int(cr[1]), int(cr[0]) # A simple which function to return path to program def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file print 'I could not find the following binary:', program sys.exit(1) def verifyArgs(args): possible_positions = [ 'center', 'center left', 'center right', 'upper center', 'lower center', 'best', 'right', 'left', 'upper right', 'lower right', 'upper left', 'lower left'] if args.legend not in possible_positions: print 'Invalid legend position requested. Possible values are:\n\t', '\n\t'.join([x for x in possible_positions]) sys.exit(1) option_count = 0 if args.read: option_count += 1 if args.run: option_count += 1 if args.plot: option_count += 1 if option_count != 1 and args.pbs != True: if args.call_back_host == None: print 'You must use one of the following: run, read, or plot' sys.exit(1) args.cwd = os.getcwd() # Work with --recover (a MOOSE application specific option) args.recover = False if args.run: if args.run[0].find('--recover') != -1: args.recover = True if args.run[0].find('~') != -1: print "You must use absolute paths. Python does not understand the '~' path discriptor.\nYou can use environment vairables (eg: $HOME) so long as they are absolute paths." sys.exit(1) if args.outfile == None and args.run: # Attempt to build the output file based on input file if re.findall(r'-i (\w+)', args.run[0]) != []: args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log'] else: args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log'] if args.pstack and (args.read is None and args.plot is None): if args.debugger is not None: if args.debugger == 'lldb': if platform.platform().find('Darwin') != -1: try: import lldb except ImportError: lldbImportError() sys.exit(1) else: results = which('lldb') elif args.debugger == 'gdb': results = which('gdb') else: print 'Invalid debugger selected. You must choose between gdb and lldb using the --debugger argument' sys.exit(1) return args def parseArguments(args=None): parser = argparse.ArgumentParser(description='Track and Display memory usage') rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage') rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command using absolute paths. You must encapsulate the command in quotes.') rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ') rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ') rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)') rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ') rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ') readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger') readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ') readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ') readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ') readgroup.add_argument('--legend', metavar='"lower left"', default='lower left', help='Place legend in one of the following locations (default --legend "lower left") "center", "center left", "center right", "upper center", "lower center", "best", "right", "left", "upper right", "lower right", "upper left", "lower left"\n ') commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results') commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ') commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ') commongroup.add_argument('--debugger', dest='debugger', metavar='gdb | lldb', nargs='?', help='Specify the debugger to use. Possible values: gdb or lldb\n ') plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot') plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ') plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ') plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ') plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ') plotgroup.add_argument('--darkmode', dest='darkmode', metavar='', action='store_const', const=True, help='When you want to be cool\n ') internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.') internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ') return verifyArgs(parser.parse_args(args)) def lldbImportError(): print """ Unable to import lldb The Python lldb API is now supplied by Xcode but not automatically set in your PYTHONPATH. Please search the internet for how to do this if you wish to use --pstack on Mac OS X. Note: If you installed Xcode to the default location of /Applications, you should only have to perform the following: export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python:$PYTHONPATH ###!! IMPORTANT !!### It may also be necessary to unload the miniconda module. If you receive a fatal Python error about PyThreadState try using your system's version of Python instead. """ if __name__ == '__main__': args = parseArguments() if args.read: ReadLog(args) sys.exit(0) if args.plot: MemoryPlotter(args) sys.exit(0) Server(args)
lgpl-2.1
agoose77/hivesystem
manual/movingpanda/panda-11c.py
1
6579
import dragonfly import dragonfly.pandahive import bee from bee import connect import math, functools from panda3d.core import NodePath import dragonfly.scene.unbound, dragonfly.scene.bound import dragonfly.std import dragonfly.io import dragonfly.canvas import Spyder # ## random matrix generator from random import random def random_matrix_generator(): while 1: a = Spyder.AxisSystem() a.rotateZ(360 * random()) a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0) yield dragonfly.scene.matrix(a, "AxisSystem") def id_generator(): n = 0 while 1: n += 1 yield "spawnedpanda" + str(n) from dragonfly.canvas import box2d from bee.mstr import mstr class parameters: pass class myscene(dragonfly.pandahive.spyderframe): a = Spyder.AxisSystem() a *= 0.25 a.origin += (-8, 42, 0) env = Spyder.Model3D("models/environment", "egg", a) a = Spyder.AxisSystem() a *= 0.005 mypanda = Spyder.Actor3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a, entityname="mypanda") a = Spyder.AxisSystem() a *= 0.005 pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a, actorclassname="pandaclass") box = Spyder.Box2D(50, 470, 96, 96) icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True) camcenter = Spyder.Entity3D( "camcenter", ( Spyder.NewMaterial("white", color=(255, 255, 255)), Spyder.Block3D((1, 1, 1), material="white"), ) ) del a, box class pandawalkhive(bee.inithive): animation = dragonfly.scene.bound.animation() walk = dragonfly.std.variable("str")("walk") connect(walk, animation.animation_name) key_w = dragonfly.io.keyboardsensor_trigger("W") connect(key_w, animation.loop) key_s = dragonfly.io.keyboardsensor_trigger("S") connect(key_s, animation.stop) setPos = dragonfly.scene.bound.setPos() setHpr = dragonfly.scene.bound.setHpr() interval = dragonfly.time.interval_time(18) connect(key_w, interval.start) connect(key_s, interval.pause) sequence = dragonfly.time.sequence(4)(8, 1, 8, 1) connect(interval.value, sequence.inp) ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0)) connect(sequence.outp1, ip1) connect(ip1, setPos) connect(key_w, ip1.start) connect(key_s, ip1.stop) ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0)) connect(sequence.outp2, ip2) connect(ip2, setHpr) connect(key_w, ip2.start) connect(key_s, ip2.stop) ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0)) connect(sequence.outp3, ip3) connect(ip3, setPos) connect(key_w, ip3.start) connect(key_s, ip3.stop) ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0)) connect(sequence.outp4, ip4) connect(ip4, setHpr) connect(key_w, ip4.start) connect(key_s, ip4.stop) connect(ip4.reach_end, interval.start) from bee.staticbind import staticbind_baseclass class pandawalkbind(staticbind_baseclass, dragonfly.event.bind, dragonfly.io.bind, dragonfly.sys.bind, dragonfly.scene.bind, dragonfly.time.bind): hive = pandawalkhive class camerabindhive(bee.inithive): interval = dragonfly.time.interval_time(30) sequence = dragonfly.time.sequence(2)(1, 1) connect(interval.value, sequence.inp) startsensor = dragonfly.sys.startsensor() ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0)) ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0)) connect(sequence.outp1, ip1.inp) connect(sequence.outp2, ip2.inp) connect(startsensor, interval.start) connect(startsensor, ip1.start) connect(ip1.reach_end, ip1.stop) connect(ip1.reach_end, ip2.start) connect(ip2.reach_end, ip2.stop) connect(ip2.reach_end, ip1.start) connect(ip2.reach_end, interval.start) sethpr = dragonfly.scene.bound.setHpr() connect(ip1, sethpr) connect(ip2, sethpr) class camerabind(staticbind_baseclass, dragonfly.event.bind, dragonfly.io.bind, dragonfly.sys.bind, dragonfly.scene.bind, dragonfly.time.bind): hive = camerabindhive class myhive(dragonfly.pandahive.pandahive): pandaname = "mypanda" pandaname_ = bee.attribute("pandaname") pandaclassname = "pandaclass" pandaclassname_ = bee.attribute("pandaclassname") canvas = dragonfly.pandahive.pandacanvas() mousearea = dragonfly.canvas.mousearea() raiser = bee.raiser() connect("evexc", raiser) z_pandawalk = pandawalkbind().worker() pandaid = dragonfly.std.variable("id")(pandaname_) connect(pandaid, z_pandawalk.bindname) camerabind = camerabind().worker() camcenter = dragonfly.std.variable("id")("camcenter") connect(camcenter, camerabind.bindname) startsensor = dragonfly.sys.startsensor() cam = dragonfly.scene.get_camera() camparent = dragonfly.scene.unbound.parent() connect(cam, camparent.entityname) connect(camcenter, camparent.entityparentname) connect(startsensor, camparent) pandaspawn = dragonfly.scene.spawn_actor() v_panda = dragonfly.std.variable("id")(pandaclassname_) connect(v_panda, pandaspawn) panda_id = dragonfly.std.generator("id", id_generator)() random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)() w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))() connect(panda_id, w_spawn.inp1) connect(random_matrix, w_spawn.inp2) do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))() connect(w_spawn, do_spawn) connect(do_spawn, pandaspawn.spawn_matrix) key_z = dragonfly.io.keyboardsensor_trigger("Z") connect(key_z, do_spawn) pandaicon_click = dragonfly.io.mouseareasensor("pandaicon") connect(pandaicon_click, do_spawn) myscene = myscene( scene="scene", canvas=canvas, mousearea=mousearea, ) wininit = bee.init("window") wininit.camera.setPos(0, 45, 25) wininit.camera.setHpr(180, -20, 0) main = myhive().getinstance() main.build("main") main.place() main.close() main.init() main.run()
bsd-2-clause
marcotcr/lime
lime/tests/test_lime_text.py
1
7518
import re import unittest import sklearn # noqa from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import f1_score from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import make_pipeline import numpy as np from lime.lime_text import LimeTextExplainer from lime.lime_text import IndexedCharacters, IndexedString class TestLimeText(unittest.TestCase): def test_lime_text_explainer_good_regressor(self): categories = ['alt.atheism', 'soc.religion.christian'] newsgroups_train = fetch_20newsgroups(subset='train', categories=categories) newsgroups_test = fetch_20newsgroups(subset='test', categories=categories) class_names = ['atheism', 'christian'] vectorizer = TfidfVectorizer(lowercase=False) train_vectors = vectorizer.fit_transform(newsgroups_train.data) test_vectors = vectorizer.transform(newsgroups_test.data) nb = MultinomialNB(alpha=.01) nb.fit(train_vectors, newsgroups_train.target) pred = nb.predict(test_vectors) f1_score(newsgroups_test.target, pred, average='weighted') c = make_pipeline(vectorizer, nb) explainer = LimeTextExplainer(class_names=class_names) idx = 83 exp = explainer.explain_instance(newsgroups_test.data[idx], c.predict_proba, num_features=6) self.assertIsNotNone(exp) self.assertEqual(6, len(exp.as_list())) def test_lime_text_tabular_equal_random_state(self): categories = ['alt.atheism', 'soc.religion.christian'] newsgroups_train = fetch_20newsgroups(subset='train', categories=categories) newsgroups_test = fetch_20newsgroups(subset='test', categories=categories) class_names = ['atheism', 'christian'] vectorizer = TfidfVectorizer(lowercase=False) train_vectors = vectorizer.fit_transform(newsgroups_train.data) test_vectors = vectorizer.transform(newsgroups_test.data) nb = MultinomialNB(alpha=.01) nb.fit(train_vectors, newsgroups_train.target) pred = nb.predict(test_vectors) f1_score(newsgroups_test.target, pred, average='weighted') c = make_pipeline(vectorizer, nb) explainer = LimeTextExplainer(class_names=class_names, random_state=10) exp_1 = explainer.explain_instance(newsgroups_test.data[83], c.predict_proba, num_features=6) explainer = LimeTextExplainer(class_names=class_names, random_state=10) exp_2 = explainer.explain_instance(newsgroups_test.data[83], c.predict_proba, num_features=6) self.assertTrue(exp_1.as_map() == exp_2.as_map()) def test_lime_text_tabular_not_equal_random_state(self): categories = ['alt.atheism', 'soc.religion.christian'] newsgroups_train = fetch_20newsgroups(subset='train', categories=categories) newsgroups_test = fetch_20newsgroups(subset='test', categories=categories) class_names = ['atheism', 'christian'] vectorizer = TfidfVectorizer(lowercase=False) train_vectors = vectorizer.fit_transform(newsgroups_train.data) test_vectors = vectorizer.transform(newsgroups_test.data) nb = MultinomialNB(alpha=.01) nb.fit(train_vectors, newsgroups_train.target) pred = nb.predict(test_vectors) f1_score(newsgroups_test.target, pred, average='weighted') c = make_pipeline(vectorizer, nb) explainer = LimeTextExplainer( class_names=class_names, random_state=10) exp_1 = explainer.explain_instance(newsgroups_test.data[83], c.predict_proba, num_features=6) explainer = LimeTextExplainer( class_names=class_names, random_state=20) exp_2 = explainer.explain_instance(newsgroups_test.data[83], c.predict_proba, num_features=6) self.assertFalse(exp_1.as_map() == exp_2.as_map()) def test_indexed_characters_bow(self): s = 'Please, take your time' inverse_vocab = ['P', 'l', 'e', 'a', 's', ',', ' ', 't', 'k', 'y', 'o', 'u', 'r', 'i', 'm'] positions = [[0], [1], [2, 5, 11, 21], [3, 9], [4], [6], [7, 12, 17], [8, 18], [10], [13], [14], [15], [16], [19], [20]] ic = IndexedCharacters(s) self.assertTrue(np.array_equal(ic.as_np, np.array(list(s)))) self.assertTrue(np.array_equal(ic.string_start, np.arange(len(s)))) self.assertTrue(ic.inverse_vocab == inverse_vocab) self.assertTrue(ic.positions == positions) def test_indexed_characters_not_bow(self): s = 'Please, take your time' ic = IndexedCharacters(s, bow=False) self.assertTrue(np.array_equal(ic.as_np, np.array(list(s)))) self.assertTrue(np.array_equal(ic.string_start, np.arange(len(s)))) self.assertTrue(ic.inverse_vocab == list(s)) self.assertTrue(np.array_equal(ic.positions, np.arange(len(s)))) def test_indexed_string_regex(self): s = 'Please, take your time. Please' tokenized_string = np.array( ['Please', ', ', 'take', ' ', 'your', ' ', 'time', '. ', 'Please']) inverse_vocab = ['Please', 'take', 'your', 'time'] start_positions = [0, 6, 8, 12, 13, 17, 18, 22, 24] positions = [[0, 8], [2], [4], [6]] indexed_string = IndexedString(s) self.assertTrue(np.array_equal(indexed_string.as_np, tokenized_string)) self.assertTrue(np.array_equal(indexed_string.string_start, start_positions)) self.assertTrue(indexed_string.inverse_vocab == inverse_vocab) self.assertTrue(np.array_equal(indexed_string.positions, positions)) def test_indexed_string_callable(self): s = 'aabbccddaa' def tokenizer(string): return [string[i] + string[i + 1] for i in range(0, len(string) - 1, 2)] tokenized_string = np.array(['aa', 'bb', 'cc', 'dd', 'aa']) inverse_vocab = ['aa', 'bb', 'cc', 'dd'] start_positions = [0, 2, 4, 6, 8] positions = [[0, 4], [1], [2], [3]] indexed_string = IndexedString(s, tokenizer) self.assertTrue(np.array_equal(indexed_string.as_np, tokenized_string)) self.assertTrue(np.array_equal(indexed_string.string_start, start_positions)) self.assertTrue(indexed_string.inverse_vocab == inverse_vocab) self.assertTrue(np.array_equal(indexed_string.positions, positions)) def test_indexed_string_inverse_removing_tokenizer(self): s = 'This is a good movie. This, it is a great movie.' def tokenizer(string): return re.split(r'(?:\W+)|$', string) indexed_string = IndexedString(s, tokenizer) self.assertEqual(s, indexed_string.inverse_removing([])) def test_indexed_string_inverse_removing_regex(self): s = 'This is a good movie. This is a great movie' indexed_string = IndexedString(s) self.assertEqual(s, indexed_string.inverse_removing([])) if __name__ == '__main__': unittest.main()
bsd-2-clause
mendax-grip/cfdemUtilities
ergun/ergunPressureAnalysis.py
2
1751
############################################################################### # # File : analysePressure.py # # Run Instructions : python analysePressure.py directory/with/the/files # # Author : Bruno Blais # # Description : This script takes all the files in a folder and output # a two column file that is N vs P # # ############################################################################### #Python imports #---------------- import os import sys import numpy import math import matplotlib.pyplot as plt #---------------- #******************************** # OPTIONS AND USER PARAMETERS #******************************** plot=True rhof=1000 #====================== # MAIN #====================== # Directory to work within if (len(sys.argv)<2) : print "You need to enter a folder argument" folder=sys.argv[1] # Acquire list of time step speedFolder=os.listdir(folder) # Go into folder #os.chdir(sys.argv[1]) #go to directory # Sort so that time will already be sorted speedFolder.sort() N=[] pAvg=[] # Loop through all times for i in speedFolder: print "Opening ", i pMat = numpy.loadtxt(folder+"/"+i, unpack=True,comments="#") t=pMat[0,:] dP=(pMat[1,:]-pMat[-1,:]) * rhof tempString= i.split("_") N.append(float(tempString[-1])) sortIndex=numpy.argsort(t) pS=dP[sortIndex] pAvg.append(numpy.average(pS[-10000:-1])) print "Post-processing over" # Save results print "Saving results" A = [numpy.asarray(N).T,numpy.asarray(pAvg).T] numpy.savetxt("pressure_"+folder, numpy.asarray(A).T, fmt='%.8e', delimiter=' ', newline='\n') #Plot results if (plot): plt.plot(t,dP) plt.ylabel('Bottom pressure') plt.xlabel('Time (s)') plt.show()
lgpl-3.0
sanketloke/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
isomerase/mozziesniff
roboskeeter/plotting/animate_trajectory.py
2
2490
from matplotlib import animation from roboskeeter.plotting.plot_environment import plot_windtunnel as pwt # Params sim_or_exp = 'simulation' # 'experiment', 'simulation' experiment = eval(sim_or_exp) highlight_inside_plume = False show_plume = False trajectory_i = None if trajectory_i is None: trajectory_i = experiment.trajectories.get_trajectory_numbers().min() # get df df = experiment.trajectories.get_trajectory_slice(trajectory_i) p = df[['position_x', 'position_y', 'position_z']].values x_t = p.reshape((1, len(p), 3)) # make into correct shape for Jake vdp's code fig, ax = pwt.plot_windtunnel(experiment.windtunnel) ax.axis('off') if show_plume: pwt.draw_plume(experiment, ax=ax) # # choose a different color for each trajectory # colors = plt.cm.jet(np.linspace(0, 1, N_trajectories)) # set up lines and points lines = sum([ax.plot([], [], [], '-', c='gray') ], []) pts = sum([ax.plot([], [], [], '*', c='black') ], []) # # prepare the axes limits # ax.set_xlim((0, 1)) # ax.set_ylim((-.127, .127)) # ax.set_zlim((0, .254)) # set point-of-view: specified by (altitude degrees, azimuth degrees) # ax.view_init(90, 0) # initialization function: plot the background of each frame def init(): for line, pt in zip(lines, pts): line.set_data([], []) line.set_3d_properties([]) pt.set_data([], []) pt.set_3d_properties([]) return lines + pts # animation function. This will be called sequentially with the frame number def animate(i): # we'll step two time-steps per frame. This leads to nice results. i = (2 * i) % x_t.shape[1] print i for line, pt, xi in zip(lines, pts, x_t): x, y, z = xi[:i].T print xi.shape line.set_data(x, y) line.set_3d_properties(z) pt.set_data(x[-1:], y[-1:]) pt.set_3d_properties(z[-1:]) # ax.view_init(30, 0.3 * i) ax.view_init(90, 0 * i) fig.canvas.draw() return lines + pts # instantiate the animator. anim = animation.FuncAnimation(fig, animate, init_func=init, interval=1, blit=False, repeat_delay=8000, frames=len(p)) # original: frames=500, interval=30, blit=True) # added writer b/c original func didn't work Writer = animation.writers['mencoder'] writer = Writer(fps=100, metadata=dict(artist='Richard'), bitrate=1000) anim.save('{}-{}.mp4'.format(sim_or_exp, trajectory_i), writer=writer) # plt.show()
mit
bnaul/scikit-learn
sklearn/datasets/_samples_generator.py
2
60565
""" Generate samples of synthetic data sets. """ # Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, # G. Louppe, J. Nothman # License: BSD 3 clause import numbers import array from collections.abc import Iterable import numpy as np from scipy import linalg import scipy.sparse as sp from ..preprocessing import MultiLabelBinarizer from ..utils import check_array, check_random_state from ..utils import shuffle as util_shuffle from ..utils.random import sample_without_replacement from ..utils.validation import _deprecate_positional_args def _generate_hypercube(samples, dimensions, rng): """Returns distinct binary samples of length dimensions """ if dimensions > 30: return np.hstack([rng.randint(2, size=(samples, dimensions - 30)), _generate_hypercube(samples, 30, rng)]) out = sample_without_replacement(2 ** dimensions, samples, random_state=rng).astype(dtype='>u4', copy=False) out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:] return out @_deprecate_positional_args def make_classification(n_samples=100, n_features=20, *, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None): """Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an ``n_informative``-dimensional hypercube with sides of length ``2*class_sep`` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, ``X`` horizontally stacks features in the following order: the primary ``n_informative`` features, followed by ``n_redundant`` linear combinations of the informative features, followed by ``n_repeated`` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns ``X[:, :n_informative + n_redundant + n_repeated]``. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. These comprise ``n_informative`` informative features, ``n_redundant`` redundant features, ``n_repeated`` duplicated features and ``n_features-n_informative-n_redundant-n_repeated`` useless features drawn at random. n_informative : int, default=2 The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension ``n_informative``. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, default=2 The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, default=0 The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, default=2 The number of classes (or labels) of the classification problem. n_clusters_per_class : int, default=2 The number of clusters per class. weights : array-like of shape (n_classes,) or (n_classes - 1,),\ default=None The proportions of samples assigned to each class. If None, then classes are balanced. Note that if ``len(weights) == n_classes - 1``, then the last class weight is automatically inferred. More than ``n_samples`` samples may be returned if the sum of ``weights`` exceeds 1. flip_y : float, default=0.01 The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. Note that the default setting flip_y > 0 might lead to less than n_classes in y in some cases. class_sep : float, default=1.0 The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : bool, default=True If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape (n_features,) or None, default=0.0 Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape (n_features,) or None, default=1.0 Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : bool, default=True Shuffle the samples and the features. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. See also -------- make_blobs: simplified variant make_multilabel_classification: unrelated generator for multilabel tasks """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError("Number of informative, redundant and repeated " "features must sum to less than the number of total" " features") # Use log2 to avoid overflow errors if n_informative < np.log2(n_classes * n_clusters_per_class): msg = "n_classes({}) * n_clusters_per_class({}) must be" msg += " smaller or equal 2**n_informative({})={}" raise ValueError(msg.format(n_classes, n_clusters_per_class, n_informative, 2**n_informative)) if weights is not None: if len(weights) not in [n_classes, n_classes - 1]: raise ValueError("Weights specified but incompatible with number " "of classes.") if len(weights) == n_classes - 1: if isinstance(weights, list): weights = weights + [1.0 - sum(weights)] else: weights = np.resize(weights, n_classes) weights[-1] = 1.0 - sum(weights[:-1]) else: weights = [1.0 / n_classes] * n_classes n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class # Distribute samples among clusters by weight n_samples_per_cluster = [ int(n_samples * weights[k % n_classes] / n_clusters_per_class) for k in range(n_clusters)] for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Initialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(float, copy=False) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.rand(n_clusters, 1) centroids *= generator.rand(1, n_informative) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.randn(n_samples, n_informative) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.rand(n_informative, n_informative) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.rand(n_informative, n_redundant) - 1 X[:, n_informative:n_informative + n_redundant] = \ np.dot(X[:, :n_informative], B) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp) X[:, n:n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.randn(n_samples, n_useless) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.rand(n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.rand(n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.rand(n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y @_deprecate_positional_args def make_multilabel_classification(n_samples=100, n_features=20, *, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator='dense', return_distributions=False, random_state=None): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. n_classes : int, default=5 The number of classes of the classification problem. n_labels : int, default=2 The average number of labels per instance. More precisely, the number of labels per sample is drawn from a Poisson distribution with ``n_labels`` as its expected value, but samples are bounded (using rejection sampling) by ``n_classes``, and must be nonzero if ``allow_unlabeled`` is False. length : int, default=50 The sum of the features (number of words if documents) is drawn from a Poisson distribution with this expected value. allow_unlabeled : bool, default=True If ``True``, some instances might not belong to any class. sparse : bool, default=False If ``True``, return a sparse feature matrix .. versionadded:: 0.17 parameter to allow *sparse* output. return_indicator : 'dense' (default) | 'sparse' | False If ``dense`` return ``Y`` in the dense binary indicator format. If ``'sparse'`` return ``Y`` in the sparse binary indicator format. ``False`` returns a list of lists of labels. return_distributions : bool, default=False If ``True``, return the prior class probability and conditional probabilities of features given classes, from which the data was drawn. random_state : int, RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. Y : array or sparse CSR matrix of shape [n_samples, n_classes] The label sets. p_c : array, shape [n_classes] The probability of each class being drawn. Only returned if ``return_distributions=True``. p_w_c : array, shape [n_features, n_classes] The probability of each feature being drawn given each class. Only returned if ``return_distributions=True``. """ if n_classes < 1: raise ValueError( "'n_classes' should be an integer greater than 0. Got {} instead." .format(n_classes) ) if length < 1: raise ValueError( "'length' should be an integer greater than 0. Got {} instead." .format(length) ) generator = check_random_state(random_state) p_c = generator.rand(n_classes) p_c /= p_c.sum() cumulative_p_c = np.cumsum(p_c) p_w_c = generator.rand(n_features, n_classes) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling y_size = n_classes + 1 while (not allow_unlabeled and y_size == 0) or y_size > n_classes: y_size = generator.poisson(n_labels) # pick n classes y = set() while len(y) != y_size: # pick a class with probability P(c) c = np.searchsorted(cumulative_p_c, generator.rand(y_size - len(y))) y.update(c) y = list(y) # pick a non-zero document length by rejection sampling n_words = 0 while n_words == 0: n_words = generator.poisson(length) # generate a document of length n_words if len(y) == 0: # if sample does not belong to any class, generate noise word words = generator.randint(n_features, size=n_words) return words, y # sample words with replacement from selected classes cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() cumulative_p_w_sample /= cumulative_p_w_sample[-1] words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words)) return words, y X_indices = array.array('i') X_indptr = array.array('i', [0]) Y = [] for i in range(n_samples): words, y = sample_example() X_indices.extend(words) X_indptr.append(len(X_indices)) Y.append(y) X_data = np.ones(len(X_indices), dtype=np.float64) X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) X.sum_duplicates() if not sparse: X = X.toarray() # return_indicator can be True due to backward compatibility if return_indicator in (True, 'sparse', 'dense'): lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse')) Y = lb.fit([range(n_classes)]).transform(Y) elif return_indicator is not False: raise ValueError("return_indicator must be either 'sparse', 'dense' " 'or False.') if return_distributions: return X, Y, p_c, p_w_c return X, Y @_deprecate_positional_args def make_hastie_10_2(n_samples=12000, *, random_state=None): """Generates data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=12000 The number of samples. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, 10] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. See also -------- make_gaussian_quantiles: a generalization of this dataset approach """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False) y[y == 0.0] = -1.0 return X, y @_deprecate_positional_args def make_regression(n_samples=100, n_features=100, *, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=100 The number of features. n_informative : int, default=10 The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, default=1 The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, default=0.0 The bias term in the underlying linear model. effective_rank : int, default=None if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float, default=0.5 The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. When a float, it should be between 0 and 1. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. shuffle : bool, default=True Shuffle the samples and the features. coef : bool, default=False If True, the coefficients of the underlying linear model are returned. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ n_informative = min(n_features, n_informative) generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.randn(n_samples, n_features) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.rand(n_informative, n_targets) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y @_deprecate_positional_args def make_circles(n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=.8): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int or tuple of 2 int, default=100 If int, it is the total number of points generated. For odd numbers, the inner circle will have one point more than the outer circle. If two-element tuple, number of points in outer circle and inner circle. shuffle : bool, default=True Whether to shuffle the samples. noise : float, default=None Standard deviation of Gaussian noise added to the data. random_state : int or RandomState instance, default=None Determines random number generation for dataset shuffling and noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. factor : float, default=.8 Scale factor between inner and outer circle in the range `(0, 1)`. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ if factor >= 1 or factor < 0: raise ValueError("'factor' has to be between 0 and 1.") if isinstance(n_samples, numbers.Integral): n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out else: try: n_samples_out, n_samples_in = n_samples except ValueError: raise ValueError('`n_samples` can be either an int or ' 'a two-element tuple.') generator = check_random_state(random_state) # so as not to have the first point = last point, we set endpoint=False linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False) linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False) outer_circ_x = np.cos(linspace_out) outer_circ_y = np.sin(linspace_out) inner_circ_x = np.cos(linspace_in) * factor inner_circ_y = np.sin(linspace_in) * factor X = np.vstack([np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]).T y = np.hstack([np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y @_deprecate_positional_args def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int or tuple of 2 int, default=100 If int, the total number of points generated. If two-element tuple, number of points in each of two moons. shuffle : bool, default=True Whether to shuffle the samples. noise : float, default=None Standard deviation of Gaussian noise added to the data. random_state : int or RandomState instance, default=None Determines random number generation for dataset shuffling and noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, 2) The generated samples. y : ndarray of shape (n_samples,) The integer labels (0 or 1) for class membership of each sample. """ if isinstance(n_samples, numbers.Integral): n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out else: try: n_samples_out, n_samples_in = n_samples except ValueError: raise ValueError('`n_samples` can be either an int or ' 'a two-element tuple.') generator = check_random_state(random_state) outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5 X = np.vstack([np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]).T y = np.hstack([np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y @_deprecate_positional_args def make_blobs(n_samples=100, n_features=2, *, centers=None, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None, return_centers=False): """Generate isotropic Gaussian blobs for clustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int or array-like, default=100 If int, it is the total number of points equally divided among clusters. If array-like, each element of the sequence indicates the number of samples per cluster. .. versionchanged:: v0.20 one can now pass an array-like to the ``n_samples`` parameter n_features : int, default=2 The number of features for each sample. centers : int or array of shape (n_centers, n_features), default=None The number of centers to generate, or the fixed center locations. If n_samples is an int and centers is None, 3 centers are generated. If n_samples is array-like, centers must be either None or an array of length equal to the length of n_samples. cluster_std : float or array-like of floats, default=1.0 The standard deviation of the clusters. center_box : tuple of float (min, max), default=(-10.0, 10.0) The bounding box for each cluster center when centers are generated at random. shuffle : bool, default=True Shuffle the samples. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. return_centers : bool, default=False If True, then return the centers of each cluster .. versionadded:: 0.23 Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. centers : array, shape [n_centers, n_features] The centers of each cluster. Only returned if ``return_centers=True``. Examples -------- >>> from sklearn.datasets import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) >>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0]) See also -------- make_classification: a more intricate variant """ generator = check_random_state(random_state) if isinstance(n_samples, numbers.Integral): # Set n_centers by looking at centers arg if centers is None: centers = 3 if isinstance(centers, numbers.Integral): n_centers = centers centers = generator.uniform(center_box[0], center_box[1], size=(n_centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] n_centers = centers.shape[0] else: # Set n_centers by looking at [n_samples] arg n_centers = len(n_samples) if centers is None: centers = generator.uniform(center_box[0], center_box[1], size=(n_centers, n_features)) try: assert len(centers) == n_centers except TypeError: raise ValueError("Parameter `centers` must be array-like. " "Got {!r} instead".format(centers)) except AssertionError: raise ValueError("Length of `n_samples` not consistent" " with number of centers. Got n_samples = {} " "and centers = {}".format(n_samples, centers)) else: centers = check_array(centers) n_features = centers.shape[1] # stds: if cluster_std is given as list, it must be consistent # with the n_centers if (hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers): raise ValueError("Length of `clusters_std` not consistent with " "number of centers. Got centers = {} " "and cluster_std = {}".format(centers, cluster_std)) if isinstance(cluster_std, numbers.Real): cluster_std = np.full(len(centers), cluster_std) X = [] y = [] if isinstance(n_samples, Iterable): n_samples_per_center = n_samples else: n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): X.append(generator.normal(loc=centers[i], scale=std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: total_n_samples = np.sum(n_samples) indices = np.arange(total_n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] if return_centers: return X, y, centers else: return X, y @_deprecate_positional_args def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None): """Generate the "Friedman #1" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are independent features uniformly distributed on the interval [0, 1]. The output `y` is created according to the formula:: y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). Out of the `n_features` features, only 5 are actually used to compute `y`. The remaining features are independent of `y`. The number of features has to be >= 5. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=10 The number of features. Should be at least 5. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. random_state : int or RandomState instance, default=None Determines random number generation for dataset noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ if n_features < 5: raise ValueError("n_features must be at least five.") generator = check_random_state(random_state) X = generator.rand(n_samples, n_features) y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples) return X, y @_deprecate_positional_args def make_friedman2(n_samples=100, *, noise=0.0, random_state=None): """Generate the "Friedman #2" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. random_state : int or RandomState instance, default=None Determines random number generation for dataset noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \ + noise * generator.randn(n_samples) return X, y @_deprecate_positional_args def make_friedman3(n_samples=100, *, noise=0.0, random_state=None): """Generate the "Friedman #3" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ / X[:, 0]) + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. random_state : int or RandomState instance, default=None Determines random number generation for dataset noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \ + noise * generator.randn(n_samples) return X, y @_deprecate_positional_args def make_low_rank_matrix(n_samples=100, n_features=100, *, effective_rank=10, tail_strength=0.5, random_state=None): """Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=100 The number of features. effective_rank : int, default=10 The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float, default=0.5 The relative importance of the fat noisy tail of the singular values profile. The value should be between 0 and 1. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The matrix. """ generator = check_random_state(random_state) n = min(n_samples, n_features) # Random (ortho normal) vectors u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic') v, _ = linalg.qr(generator.randn(n_features, n), mode='economic') # Index of the singular values singular_ind = np.arange(n, dtype=np.float64) # Build the singular profile by assembling signal and noise components low_rank = ((1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)) tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) s = np.identity(n) * (low_rank + tail) return np.dot(np.dot(u, s), v.T) @_deprecate_positional_args def make_sparse_coded_signal(n_samples, *, n_components, n_features, n_nonzero_coefs, random_state=None): """Generate a signal as a sparse combination of dictionary elements. Returns a matrix Y = DX, such as D is (n_features, n_components), X is (n_components, n_samples) and each column of X has exactly n_nonzero_coefs non-zero elements. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int number of samples to generate n_components : int number of components in the dictionary n_features : int number of features of the dataset to generate n_nonzero_coefs : int number of active (non-zero) coefficients in each sample random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- data : array of shape [n_features, n_samples] The encoded signal (Y). dictionary : array of shape [n_features, n_components] The dictionary with normalized components (D). code : array of shape [n_components, n_samples] The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). """ generator = check_random_state(random_state) # generate dictionary D = generator.randn(n_features, n_components) D /= np.sqrt(np.sum((D ** 2), axis=0)) # generate code X = np.zeros((n_components, n_samples)) for i in range(n_samples): idx = np.arange(n_components) generator.shuffle(idx) idx = idx[:n_nonzero_coefs] X[idx, i] = generator.randn(n_nonzero_coefs) # encode signal Y = np.dot(D, X) return map(np.squeeze, (Y, D, X)) @_deprecate_positional_args def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None): """Generate a random regression problem with sparse uncorrelated design This dataset is described in Celeux et al [1]. as:: X ~ N(0, 1) y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] Only the first 4 features are informative. The remaining features are useless. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=10 The number of features. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, "Regularization in regression: comparing Bayesian and frequentist methods in a poorly informative situation", 2009. """ generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), scale=np.ones(n_samples)) return X, y @_deprecate_positional_args def make_spd_matrix(n_dim, *, random_state=None): """Generate a random symmetric, positive-definite matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_dim : int The matrix dimension. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_dim, n_dim] The random symmetric, positive-definite matrix. See also -------- make_sparse_spd_matrix """ generator = check_random_state(random_state) A = generator.rand(n_dim, n_dim) U, _, Vt = linalg.svd(np.dot(A.T, A)) X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), Vt) return X @_deprecate_positional_args def make_sparse_spd_matrix(dim=1, *, alpha=0.95, norm_diag=False, smallest_coef=.1, largest_coef=.9, random_state=None): """Generate a sparse symmetric definite positive matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- dim : integer, default=1 The size of the random matrix to generate. alpha : float, default=0.95 The probability that a coefficient is zero (see notes). Larger values enforce more sparsity. The value should be in the range 0 and 1. norm_diag : bool, default=False Whether to normalize the output matrix to make the leading diagonal elements all 1 smallest_coef : float, default=0.1 The value of the smallest coefficient between 0 and 1. largest_coef : float, default=0.9 The value of the largest coefficient between 0 and 1. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- prec : sparse matrix of shape (dim, dim) The generated matrix. Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. See also -------- make_spd_matrix """ random_state = check_random_state(random_state) chol = -np.eye(dim) aux = random_state.rand(dim, dim) aux[aux < alpha] = 0 aux[aux > alpha] = (smallest_coef + (largest_coef - smallest_coef) * random_state.rand(np.sum(aux > alpha))) aux = np.tril(aux, k=-1) # Permute the lines: we don't want to have asymmetries in the final # SPD matrix permutation = random_state.permutation(dim) aux = aux[permutation].T[permutation] chol += aux prec = np.dot(chol.T, chol) if norm_diag: # Form the diagonal vector into a row matrix d = np.diag(prec).reshape(1, prec.shape[0]) d = 1. / np.sqrt(d) prec *= d prec *= d.T return prec @_deprecate_positional_args def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None): """Generate a swiss roll dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of sample points on the S curve. noise : float, default=0.0 The standard deviation of the gaussian noise. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. Notes ----- The algorithm is from Marsland [1]. References ---------- .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", Chapter 10, 2009. http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py """ generator = check_random_state(random_state) t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples)) x = t * np.cos(t) y = 21 * generator.rand(1, n_samples) z = t * np.sin(t) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t @_deprecate_positional_args def make_s_curve(n_samples=100, *, noise=0.0, random_state=None): """Generate an S curve dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of sample points on the S curve. noise : float, default=0.0 The standard deviation of the gaussian noise. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. """ generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t @_deprecate_positional_args def make_gaussian_quantiles(*, mean=None, cov=1., n_samples=100, n_features=2, n_classes=3, shuffle=True, random_state=None): r"""Generate isotropic Gaussian and label samples by quantile This classification dataset is constructed by taking a multi-dimensional standard normal distribution and defining classes separated by nested concentric multi-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- mean : array of shape (n_features,), default=None The mean of the multi-dimensional normal distribution. If None then use the origin (0, 0, ...). cov : float, default=1.0 The covariance matrix will be this value times the unit matrix. This dataset only produces symmetric normal distributions. n_samples : int, default=100 The total number of points equally divided among classes. n_features : int, default=2 The number of features for each sample. n_classes : int, default=3 The number of classes shuffle : bool, default=True Shuffle the samples. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for quantile membership of each sample. Notes ----- The dataset is from Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ if n_samples < n_classes: raise ValueError("n_samples must be at least n_classes") generator = check_random_state(random_state) if mean is None: mean = np.zeros(n_features) else: mean = np.array(mean) # Build multivariate normal distribution X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) # Sort by distance from origin idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) X = X[idx, :] # Label by quantile step = n_samples // n_classes y = np.hstack([np.repeat(np.arange(n_classes), step), np.repeat(n_classes - 1, n_samples - step * n_classes)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) return X, y def _shuffle(data, random_state=None): generator = check_random_state(random_state) n_rows, n_cols = data.shape row_idx = generator.permutation(n_rows) col_idx = generator.permutation(n_cols) result = data[row_idx][:, col_idx] return result, row_idx, col_idx @_deprecate_positional_args def make_biclusters(shape, n_clusters, *, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with constant block diagonal structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer The number of biclusters. noise : float, default=0.0 The standard deviation of the gaussian noise. minval : int, default=10 Minimum value of a bicluster. maxval : int, default=100 Maximum value of a bicluster. shuffle : bool, default=True Shuffle the samples. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and words using bipartite spectral graph partitioning. In Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 269-274). ACM. See also -------- make_checkerboard """ generator = check_random_state(random_state) n_rows, n_cols = shape consts = generator.uniform(minval, maxval, n_clusters) # row and column clusters of approximately equal sizes row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_clusters): selector = np.outer(row_labels == i, col_labels == i) result[selector] += consts[i] if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack([row_labels == c for c in range(n_clusters)]) cols = np.vstack([col_labels == c for c in range(n_clusters)]) return result, rows, cols @_deprecate_positional_args def make_checkerboard(shape, n_clusters, *, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with block checkerboard structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : tuple (n_rows, n_cols) The shape of the result. n_clusters : int or array-like (n_row_clusters, n_column_clusters) The number of row and column clusters. noise : float, default=0.0 The standard deviation of the gaussian noise. minval : int, default=10 Minimum value of a bicluster. maxval : int, default=100 Maximum value of a bicluster. shuffle : bool, default=True Shuffle the samples. random_state : int or RandomState instance, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). Spectral biclustering of microarray data: coclustering genes and conditions. Genome research, 13(4), 703-716. See also -------- make_biclusters """ generator = check_random_state(random_state) if hasattr(n_clusters, "__len__"): n_row_clusters, n_col_clusters = n_clusters else: n_row_clusters = n_col_clusters = n_clusters # row and column clusters of approximately equal sizes n_rows, n_cols = shape row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_row_clusters): for j in range(n_col_clusters): selector = np.outer(row_labels == i, col_labels == j) result[selector] += generator.uniform(minval, maxval) if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack([row_labels == label for label in range(n_row_clusters) for _ in range(n_col_clusters)]) cols = np.vstack([col_labels == label for _ in range(n_row_clusters) for label in range(n_col_clusters)]) return result, rows, cols
bsd-3-clause
effigies/mne-python
examples/inverse/plot_make_inverse_operator.py
2
3383
""" =============================================================== Assemble inverse operator and compute MNE-dSPM inverse solution =============================================================== Assemble M/EEG, MEG, and EEG inverse operators and compute dSPM inverse solution on MNE evoked dataset and stores the solution in stc files for visualisation. """ # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) print(__doc__) import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import (make_inverse_operator, apply_inverse, write_inverse_operator) data_path = sample.data_path() fname_fwd_meeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' fname_fwd_eeg = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif' fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif' fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' snr = 3.0 lambda2 = 1.0 / snr ** 2 # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True) noise_cov = mne.read_cov(fname_cov) # regularize noise covariance noise_cov = mne.cov.regularize(noise_cov, evoked.info, mag=0.05, grad=0.05, eeg=0.1, proj=True) # Restrict forward solution as necessary for MEG forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False) # Alternatively, you can just load a forward solution that is restricted forward_eeg = mne.read_forward_solution(fname_fwd_eeg, surf_ori=True) # make an M/EEG, MEG-only, and EEG-only inverse operators info = evoked.info inverse_operator_meeg = make_inverse_operator(info, forward_meeg, noise_cov, loose=0.2, depth=0.8) inverse_operator_meg = make_inverse_operator(info, forward_meg, noise_cov, loose=0.2, depth=0.8) inverse_operator_eeg = make_inverse_operator(info, forward_eeg, noise_cov, loose=0.2, depth=0.8) write_inverse_operator('sample_audvis-meeg-oct-6-inv.fif', inverse_operator_meeg) write_inverse_operator('sample_audvis-meg-oct-6-inv.fif', inverse_operator_meg) write_inverse_operator('sample_audvis-eeg-oct-6-inv.fif', inverse_operator_eeg) # Compute inverse solution stcs = dict() stcs['meeg'] = apply_inverse(evoked, inverse_operator_meeg, lambda2, "dSPM", pick_ori=None) stcs['meg'] = apply_inverse(evoked, inverse_operator_meg, lambda2, "dSPM", pick_ori=None) stcs['eeg'] = apply_inverse(evoked, inverse_operator_eeg, lambda2, "dSPM", pick_ori=None) # Save result in stc files names = ['meeg', 'meg', 'eeg'] for name in names: stcs[name].save('mne_dSPM_inverse-%s' % name) ############################################################################### # View activation time-series plt.close('all') plt.figure(figsize=(8, 6)) for ii in range(len(stcs)): name = names[ii] stc = stcs[name] plt.subplot(len(stcs), 1, ii + 1) plt.plot(1e3 * stc.times, stc.data[::150, :].T) plt.ylabel('%s\ndSPM value' % str.upper(name)) plt.xlabel('time (ms)') plt.show()
bsd-3-clause
marcoviero/simstack
run_simstack_cmd_line.py
1
10390
#!/usr/bin/env python # Standard modules import pdb import os import os.path import sys import shutil import time import logging import importlib import numpy as np import pandas as pd import cPickle as pickle from astropy.wcs import WCS # Modules within this package import parameters from skymaps import Skymaps from bincatalogs import Field_catalogs from utils import circle_mask from utils import dist_idl from utils import gauss_kern from utils import pad_and_smooth_psf from utils import shift_twod from utils import smooth_psf from lmfit import Parameters, minimize, fit_report from simstack import stack_libraries_in_layers from simstack import stack_libraries_in_layers_w_background from simstack import is_true from bootstrap import Bootstrap def main(): # Set up logging logging.basicConfig( level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%d-%m %I:%M:%S %p') # Get parameters from the provided parameter file param_file_path = sys.argv[1] params = parameters.get_params(param_file_path) zkey = params['zkey'] mkey = params['mkey'] rkey = params['ra_key'] dkey = params['dec_key'] t0 = time.time() if params['bins']['bin_in_lookback_time'] == True: z_pref = 'lookt' else: z_pref = 'z' # Stack in Slices or ALL AT ONCE Choice made here if params['bins']['stack_all_z_at_once'] == True: n_slices = 1 else: n_slices = len(params['bins']['z_nodes']) - 1 #Save Parameter file in folder save_paramfile(params) for i in range(n_slices): if params['bins']['stack_all_z_at_once'] == True: j = None stacked_flux_density_key = 'all_'+z_pref else: j = i if params['bins']['bin_in_lookback_time'] == True: stacked_flux_density_key = '{:.2f}'.format(params['bins']['t_nodes'][j])+'-'+'{:.2f}'.format(params['bins']['t_nodes'][j+1]) else: stacked_flux_density_key = str(params['bins']['t_nodes'][j])+'-'+str(params['bins']['t_nodes'][j+1]) print stacked_flux_density_key # From parameter file read maps, psfs, cats, and divide them into bins sky_library = get_maps(params) cats = get_catalogs(params) if params['bootstrap'] == True: pcat = Bootstrap(cats.table) # Bootstrap Loop Starts here for iboot in np.arange(params['number_of_boots'])+params['boot0']: #stacked_flux_densities = {} if params['bootstrap'] == True: print 'Running ' +str(int(iboot))+' of '+ str(int(params['boot0'])) +'-'+ str(int(params['boot0']+params['number_of_boots']-1)) + ' bootstraps' pcat.perturb_catalog(perturb_z = params['perturb_z']) bootcat = Field_catalogs(pcat.pseudo_cat,zkey=zkey,mkey=mkey,rkey=rkey,dkey=dkey) binned_ra_dec = get_bin_radec(params, bootcat, single_slice = j) if params['save_bin_ids'] == False: bin_ids = None else: bin_ids = get_bin_ids(params, bootcat, single_slice = j) out_file_path = params['io']['output_folder']+'/bootstrapped_fluxes/'+params['io']['shortname'] out_file_suffix = '_'+stacked_flux_density_key+'_boot_'+str(int(iboot)) else: binned_ra_dec = get_bin_radec(params, cats, single_slice = j) if params['save_bin_ids'] == False: bin_ids = None else: bin_ids = get_bin_ids(params, cats, single_slice = j) out_file_path = params['io']['output_folder'] + '/simstack_fluxes/' + params['io']['shortname'] out_file_suffix = '_'+stacked_flux_density_key # Do simultaneous stacking if params['float_background'] == True: stacked_flux_densities = stack_libraries_in_layers_w_background(sky_library,binned_ra_dec) else: stacked_flux_densities = stack_libraries_in_layers(sky_library,binned_ra_dec) save_stacked_fluxes(stacked_flux_densities,params, out_file_path,out_file_suffix, IDs=bin_ids) #pdb.set_trace() # Summarize timing t1 = time.time() tpass = t1-t0 logging.info("Done!") logging.info("") logging.info("Total time : {:.4f} minutes\n".format(tpass/60.)) def get_maps(params): ''' Read maps and psfs and store into dictionaries ''' sky_library = {} for t in params['library_keys']: sky = Skymaps(params['map_files'][t],params['noise_files'][t],params['psfs'][t+'_fwhm'],color_correction=params['color_correction'][t], beam_area=params['psfs'][t+'_beam_area']) sky.add_wavelength(params['wavelength'][t]) sky.add_fwhm(params['psfs'][t+'_fwhm']) sky_library[t] = sky return sky_library def get_catalogs(params): # Formatting no longer needed as tbl = pd.read_table(params['catalogs']['catalog_path']+params['catalogs']['catalog_file'],sep=',') tbl['ID'] = range(len(tbl)) if 'sfg' in tbl.keys(): pass elif 'CLASS' in tbl.keys(): tbl['sfg']=tbl['CLASS'] zkey = params['zkey'] mkey = params['mkey'] rkey = params['ra_key'] dkey = params['dec_key'] catout = Field_catalogs(tbl,zkey=zkey,mkey=mkey,rkey=rkey,dkey=dkey) return catout def get_bin_ids(params, cats, single_slice = None): if single_slice == None: z_nodes = params['bins']['z_nodes'] else: z_nodes = params['bins']['z_nodes'][single_slice:single_slice+2] m_nodes = params['bins']['m_nodes'] if params['galaxy_splitting_scheme'] == 'sf-qt': cats.separate_sf_qt() cats.get_sf_qt_mass_redshift_bins(z_nodes,m_nodes) bin_ids = cats.id_z_ms elif params['galaxy_splitting_scheme'] == '5pops': Fcut = params['cuts']['fcut'] MIPS24_cut = params['cuts']['mips24_cut'] cats.separate_5pops(Fcut=Fcut,MIPS24_cut=MIPS24_cut) cats.get_5pops_mass_redshift_bins(z_nodes,m_nodes) bin_ids = cats.id_z_ms_5pop elif params['galaxy_splitting_scheme'] == '4pops': Fcut = params['cuts']['fcut'] age_cut = params['cuts']['age_cut'] cats.separate_4pops(Fcut=Fcut,age_cut=age_cut) cats.get_4pops_mass_redshift_bins(z_nodes,m_nodes) bin_ids = cats.id_z_ms_4pop elif params['galaxy_splitting_scheme'] == 'uvj': c_nodes = params['populations']['c_nodes'] c_names = params['populations']['pop_names'] cats.table['UVJ']=np.sqrt((cats.table['rf_U_V'] - np.min(cats.table['rf_U_V']))**2 + (cats.table['rf_V_J']-np.min(cats.table['rf_V_J'])) ** 2) cats.separate_uvj_pops(c_nodes) cats.get_mass_redshift_uvj_bins(z_nodes,m_nodes,c_names) bin_ids = cats.id_z_ms_pop elif params['galaxy_splitting_scheme'] == 'general': cuts_dict = params['populations'] cats.separate_pops_by_name(cuts_dict) cats.get_subpop_ids(z_nodes, m_nodes, cuts_dict) bin_ids = cats.subpop_ids return bin_ids def get_bin_radec(params, cats, single_slice = None): if single_slice == None: z_nodes = params['bins']['z_nodes'] else: z_nodes = params['bins']['z_nodes'][single_slice:single_slice+2] m_nodes = params['bins']['m_nodes'] if params['galaxy_splitting_scheme'] == 'sf-qt': cats.separate_sf_qt() cats.get_sf_qt_mass_redshift_bins(z_nodes,m_nodes) binned_ra_dec = cats.subset_positions(cats.id_z_ms) elif params['galaxy_splitting_scheme'] == '5pops': Fcut = params['cuts']['fcut'] MIPS24_cut = params['cuts']['mips24_cut'] cats.separate_5pops(Fcut=Fcut,MIPS24_cut=MIPS24_cut) cats.get_5pops_mass_redshift_bins(z_nodes,m_nodes) binned_ra_dec = cats.subset_positions(cats.id_z_ms_5pop) elif params['galaxy_splitting_scheme'] == '4pops': Fcut = params['cuts']['fcut'] age_cut = params['cuts']['age_cut'] cats.separate_4pops(Fcut=Fcut,age_cut=age_cut) cats.get_4pops_mass_redshift_bins(z_nodes,m_nodes) binned_ra_dec = cats.subset_positions(cats.id_z_ms_4pop) elif params['galaxy_splitting_scheme'] == 'uvj': c_nodes = params['populations']['c_nodes'] c_names = params['populations']['pop_names'] cats.table['UVJ']=np.sqrt((cats.table['rf_U_V'] - np.min(cats.table['rf_U_V']))**2 + (cats.table['rf_V_J']-np.min(cats.table['rf_V_J'])) ** 2) cats.separate_uvj_pops(c_nodes) cats.get_mass_redshift_uvj_bins(z_nodes,m_nodes,c_names) binned_ra_dec = cats.subset_positions(cats.id_z_ms_pop) elif params['galaxy_splitting_scheme'] == 'general': cuts_dict = params['populations'] cats.separate_pops_by_name(cuts_dict) cats.get_subpop_ids(z_nodes, m_nodes, cuts_dict) binned_ra_dec = cats.subset_positions(cats.subpop_ids) print z_nodes return binned_ra_dec def save_stacked_fluxes(stacked_fluxes, params, out_file_path, out_file_suffix, IDs=None): fpath = "%s/%s_%s%s.p" % (out_file_path, params['io']['flux_densities_filename'],params['io']['shortname'],out_file_suffix) print 'pickling to '+fpath if not os.path.exists(out_file_path): os.makedirs(out_file_path) if IDs == None: pickle.dump( stacked_fluxes, open( fpath, "wb" )) #, protocol=2 ) else: pickle.dump( [IDs, stacked_fluxes], open( fpath, "wb" )) #, protocol=2 ) def save_paramfile(params): fp_in = params['io']['param_file_path'] if params['bootstrap'] == True: outdir = params['io']['output_folder']+'/bootstrapped_fluxes/'+params['io']['shortname'] else: outdir = params['io']['output_folder']+'/simstack_fluxes/'+params['io']['shortname'] print 'writing parameter file to '+outdir if not os.path.exists(outdir): os.makedirs(outdir) fname = os.path.basename(fp_in) fp_out = os.path.join(outdir, fname) logging.info("Copying parameter file...") logging.info(" FROM : {}".format(fp_in)) logging.info(" TO : {}".format(fp_out)) logging.info("") shutil.copyfile(fp_in, fp_out) if __name__=="__main__": main() else: logging.info("Note: `mapit` module not being run as main executable.")
mit
nhejazi/scikit-learn
examples/applications/plot_out_of_core_classification.py
10
13654
""" ====================================================== Out-of-core classification of text documents ====================================================== This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn't fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch. The dataset used in this example is Reuters-21578 as provided by the UCI ML repository. It will be automatically downloaded and uncompressed on first run. The plot represents the learning curve of the classifier: the evolution of classification accuracy over the course of the mini-batches. Accuracy is measured on the first 1000 samples, held out as a validation set. To limit the memory consumption, we queue examples up to a fixed amount before feeding them to the learner. """ # Authors: Eustache Diemert <eustache@diemert.fr> # @FedericoV <https://github.com/FedericoV/> # License: BSD 3 clause from __future__ import print_function from glob import glob import itertools import os.path import re import tarfile import time import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from sklearn.externals.six.moves import html_parser from sklearn.externals.six.moves import urllib from sklearn.datasets import get_data_home from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import SGDClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import Perceptron from sklearn.naive_bayes import MultinomialNB def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() ############################################################################### # Reuters Dataset related routines # -------------------------------- # class ReutersParser(html_parser.HTMLParser): """Utility class to parse a SGML file and yield documents one at a time.""" def __init__(self, encoding='latin-1'): html_parser.HTMLParser.__init__(self) self._reset() self.encoding = encoding def handle_starttag(self, tag, attrs): method = 'start_' + tag getattr(self, method, lambda x: None)(attrs) def handle_endtag(self, tag): method = 'end_' + tag getattr(self, method, lambda: None)() def _reset(self): self.in_title = 0 self.in_body = 0 self.in_topics = 0 self.in_topic_d = 0 self.title = "" self.body = "" self.topics = [] self.topic_d = "" def parse(self, fd): self.docs = [] for chunk in fd: self.feed(chunk.decode(self.encoding)) for doc in self.docs: yield doc self.docs = [] self.close() def handle_data(self, data): if self.in_body: self.body += data elif self.in_title: self.title += data elif self.in_topic_d: self.topic_d += data def start_reuters(self, attributes): pass def end_reuters(self): self.body = re.sub(r'\s+', r' ', self.body) self.docs.append({'title': self.title, 'body': self.body, 'topics': self.topics}) self._reset() def start_title(self, attributes): self.in_title = 1 def end_title(self): self.in_title = 0 def start_body(self, attributes): self.in_body = 1 def end_body(self): self.in_body = 0 def start_topics(self, attributes): self.in_topics = 1 def end_topics(self): self.in_topics = 0 def start_d(self, attributes): self.in_topic_d = 1 def end_d(self): self.in_topic_d = 0 self.topics.append(self.topic_d) self.topic_d = "" def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/' 'reuters21578-mld/reuters21578.tar.gz') ARCHIVE_FILENAME = 'reuters21578.tar.gz' if data_path is None: data_path = os.path.join(get_data_home(), "reuters") if not os.path.exists(data_path): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) os.mkdir(data_path) def progress(blocknum, bs, size): total_sz_mb = '%.2f MB' % (size / 1e6) current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6) if _not_in_sphinx(): print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb), end='') archive_path = os.path.join(data_path, ARCHIVE_FILENAME) urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): print('\r', end='') print("untarring Reuters dataset...") tarfile.open(archive_path, 'r:gz').extractall(data_path) print("done.") parser = ReutersParser() for filename in glob(os.path.join(data_path, "*.sgm")): for doc in parser.parse(open(filename, 'rb')): yield doc ############################################################################### # Main # ---- # # Create the vectorizer and limit the number of features to a reasonable # maximum vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18, alternate_sign=False) # Iterator over parsed Reuters SGML files. data_stream = stream_reuters_documents() # We learn a binary classification between the "acq" class and all the others. # "acq" was chosen as it is more or less evenly distributed in the Reuters # files. For other datasets, one should take care of creating a test set with # a realistic portion of positive instances. all_classes = np.array([0, 1]) positive_class = 'acq' # Here are some classifiers that support the `partial_fit` method partial_fit_classifiers = { 'SGD': SGDClassifier(), 'Perceptron': Perceptron(), 'NB Multinomial': MultinomialNB(alpha=0.01), 'Passive-Aggressive': PassiveAggressiveClassifier(), } def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics']) for doc in itertools.islice(doc_iter, size) if doc['topics']] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int) def iter_minibatches(doc_iter, minibatch_size): """Generator of minibatches.""" X_text, y = get_minibatch(doc_iter, minibatch_size) while len(X_text): yield X_text, y X_text, y = get_minibatch(doc_iter, minibatch_size) # test data statistics test_stats = {'n_test': 0, 'n_test_pos': 0} # First we hold out a number of examples to estimate accuracy n_test_documents = 1000 tick = time.time() X_test_text, y_test = get_minibatch(data_stream, 1000) parsing_time = time.time() - tick tick = time.time() X_test = vectorizer.transform(X_test_text) vectorizing_time = time.time() - tick test_stats['n_test'] += len(y_test) test_stats['n_test_pos'] += sum(y_test) print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test))) def progress(cls_name, stats): """Report progress information, return a string.""" duration = time.time() - stats['t0'] s = "%20s classifier : \t" % cls_name s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats s += "accuracy: %(accuracy).3f " % stats s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration) return s cls_stats = {} for cls_name in partial_fit_classifiers: stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(), 'runtime_history': [(0, 0)], 'total_fit_time': 0.0} cls_stats[cls_name] = stats get_minibatch(data_stream, n_test_documents) # Discard test set # We will feed the classifier with mini-batches of 1000 documents; this means # we have at most 1000 docs in memory at any time. The smaller the document # batch, the bigger the relative overhead of the partial fit methods. minibatch_size = 1000 # Create the data_stream that parses Reuters SGML files and iterates on # documents as a stream. minibatch_iterators = iter_minibatches(data_stream, minibatch_size) total_vect_time = 0.0 # Main loop : iterate on mini-batches of examples for i, (X_train_text, y_train) in enumerate(minibatch_iterators): tick = time.time() X_train = vectorizer.transform(X_train_text) total_vect_time += time.time() - tick for cls_name, cls in partial_fit_classifiers.items(): tick = time.time() # update estimator with examples in the current mini-batch cls.partial_fit(X_train, y_train, classes=all_classes) # accumulate test accuracy stats cls_stats[cls_name]['total_fit_time'] += time.time() - tick cls_stats[cls_name]['n_train'] += X_train.shape[0] cls_stats[cls_name]['n_train_pos'] += sum(y_train) tick = time.time() cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test) cls_stats[cls_name]['prediction_time'] = time.time() - tick acc_history = (cls_stats[cls_name]['accuracy'], cls_stats[cls_name]['n_train']) cls_stats[cls_name]['accuracy_history'].append(acc_history) run_history = (cls_stats[cls_name]['accuracy'], total_vect_time + cls_stats[cls_name]['total_fit_time']) cls_stats[cls_name]['runtime_history'].append(run_history) if i % 3 == 0: print(progress(cls_name, cls_stats[cls_name])) if i % 3 == 0: print('\n') ############################################################################### # Plot results # ------------ def plot_accuracy(x, y, x_legend): """Plot accuracy as a function of x.""" x = np.array(x) y = np.array(y) plt.title('Classification accuracy as a function of %s' % x_legend) plt.xlabel('%s' % x_legend) plt.ylabel('Accuracy') plt.grid(True) plt.plot(x, y) rcParams['legend.fontsize'] = 10 cls_names = list(sorted(cls_stats.keys())) # Plot accuracy evolution plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with #examples accuracy, n_examples = zip(*stats['accuracy_history']) plot_accuracy(n_examples, accuracy, "training examples (#)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with runtime accuracy, runtime = zip(*stats['runtime_history']) plot_accuracy(runtime, accuracy, 'runtime (s)') ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') # Plot fitting times plt.figure() fig = plt.gcf() cls_runtime = [] for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['total_fit_time']) cls_runtime.append(total_vect_time) cls_names.append('Vectorization') bar_colors = ['b', 'g', 'r', 'c', 'm', 'y'] ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=10) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Training Times') def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, '%.4f' % height, ha='center', va='bottom') autolabel(rectangles) plt.show() # Plot prediction times plt.figure() cls_runtime = [] cls_names = list(sorted(cls_stats.keys())) for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['prediction_time']) cls_runtime.append(parsing_time) cls_names.append('Read/Parse\n+Feat.Extr.') cls_runtime.append(vectorizing_time) cls_names.append('Hashing\n+Vect.') ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=8) plt.setp(plt.xticks()[1], rotation=30) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Prediction Times (%d instances)' % n_test_documents) autolabel(rectangles) plt.show()
bsd-3-clause
xubenben/scikit-learn
examples/linear_model/plot_logistic_l1_l2_sparsity.py
384
2601
""" ============================================== L1 Penalty and Sparsity in Logistic Regression ============================================== Comparison of the sparsity (percentage of zero coefficients) of solutions when L1 and L2 penalty are used for different values of C. We can see that large values of C give more freedom to the model. Conversely, smaller values of C constrain the model more. In the L1 penalty case, this leads to sparser solutions. We classify 8x8 images of digits into two classes: 0-4 against 5-9. The visualization shows coefficients of the models for varying C. """ print(__doc__) # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.preprocessing import StandardScaler digits = datasets.load_digits() X, y = digits.data, digits.target X = StandardScaler().fit_transform(X) # classify small against large digits y = (y > 4).astype(np.int) # Set regularization parameter for i, C in enumerate((100, 1, 0.01)): # turn down tolerance for short training time clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01) clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01) clf_l1_LR.fit(X, y) clf_l2_LR.fit(X, y) coef_l1_LR = clf_l1_LR.coef_.ravel() coef_l2_LR = clf_l2_LR.coef_.ravel() # coef_l1_LR contains zeros due to the # L1 sparsity inducing norm sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100 sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100 print("C=%.2f" % C) print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR) print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y)) print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR) print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y)) l1_plot = plt.subplot(3, 2, 2 * i + 1) l2_plot = plt.subplot(3, 2, 2 * (i + 1)) if i == 0: l1_plot.set_title("L1 penalty") l2_plot.set_title("L2 penalty") l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) plt.text(-8, 3, "C = %.2f" % C) l1_plot.set_xticks(()) l1_plot.set_yticks(()) l2_plot.set_xticks(()) l2_plot.set_yticks(()) plt.show()
bsd-3-clause
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/sklearn/metrics/pairwise.py
7
47000
# -*- coding: utf-8 -*- # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Robert Layton <robertlayton@gmail.com> # Andreas Mueller <amueller@ais.uni-bonn.de> # Philippe Gervais <philippe.gervais@inria.fr> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause import itertools from functools import partial import warnings import numpy as np from scipy.spatial import distance from scipy.sparse import csr_matrix from scipy.sparse import issparse from ..utils import check_array from ..utils import gen_even_slices from ..utils import gen_batches from ..utils.extmath import row_norms, safe_sparse_dot from ..preprocessing import normalize from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.joblib import cpu_count from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan # Utility Functions def _return_float_dtype(X, Y): """ 1. If dtype of X and Y is float32, then dtype float32 is returned. 2. Else dtype float is returned. """ if not issparse(X) and not isinstance(X, np.ndarray): X = np.asarray(X) if Y is None: Y_dtype = X.dtype elif not issparse(Y) and not isinstance(Y, np.ndarray): Y = np.asarray(Y) Y_dtype = Y.dtype else: Y_dtype = Y.dtype if X.dtype == Y_dtype == np.float32: dtype = np.float32 else: dtype = np.float return X, Y, dtype def check_pairwise_arrays(X, Y, precomputed=False, dtype=None): """ Set X and Y appropriately and checks inputs If Y is None, it is set as a pointer to X (i.e. not a copy). If Y is given, this does not happen. All distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats (or dtype if provided). Finally, the function checks that the size of the second dimension of the two arrays is equal, or the equivalent check for a precomputed distance matrix. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) precomputed : bool True if X is to be treated as precomputed distances to the samples in Y. dtype : string, type, list of types or None (default=None) Data type required for X and Y. If None, the dtype will be an appropriate float type selected by _return_float_dtype. .. versionadded:: 0.18 Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y, dtype_float = _return_float_dtype(X, Y) warn_on_dtype = dtype is not None estimator = 'check_pairwise_arrays' if dtype is None: dtype = dtype_float if Y is X or Y is None: X = Y = check_array(X, accept_sparse='csr', dtype=dtype, warn_on_dtype=warn_on_dtype, estimator=estimator) else: X = check_array(X, accept_sparse='csr', dtype=dtype, warn_on_dtype=warn_on_dtype, estimator=estimator) Y = check_array(Y, accept_sparse='csr', dtype=dtype, warn_on_dtype=warn_on_dtype, estimator=estimator) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError("Precomputed metric requires shape " "(n_queries, n_indexed). Got (%d, %d) " "for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError("Incompatible dimension for X and Y matrices: " "X.shape[1] == %d while Y.shape[1] == %d" % ( X.shape[1], Y.shape[1])) return X, Y def check_paired_arrays(X, Y): """ Set X and Y appropriately and checks inputs for paired distances All paired distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the dimensions of the two arrays are equal. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y = check_pairwise_arrays(X, Y) if X.shape != Y.shape: raise ValueError("X and Y should be of same shape. They were " "respectively %r and %r long." % (X.shape, Y.shape)) return X, Y # Pairwise distances def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False, X_norm_squared=None): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors. For efficiency reasons, the euclidean distance between a pair of row vector x and y is computed as:: dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y)) This formulation has two advantages over other ways of computing distances. First, it is computationally efficient when dealing with sparse data. Second, if one argument varies but the other remains unchanged, then `dot(x, x)` and/or `dot(y, y)` can be pre-computed. However, this is not the most precise way of doing this computation, and the distance matrix returned by this function may not be exactly symmetric as required by, e.g., ``scipy.spatial.distance`` functions. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_1, n_features) Y : {array-like, sparse matrix}, shape (n_samples_2, n_features) Y_norm_squared : array-like, shape (n_samples_2, ), optional Pre-computed dot-products of vectors in Y (e.g., ``(Y**2).sum(axis=1)``) squared : boolean, optional Return squared Euclidean distances. X_norm_squared : array-like, shape = [n_samples_1], optional Pre-computed dot-products of vectors in X (e.g., ``(X**2).sum(axis=1)``) Returns ------- distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2) Examples -------- >>> from sklearn.metrics.pairwise import euclidean_distances >>> X = [[0, 1], [1, 1]] >>> # distance between rows of X >>> euclidean_distances(X, X) array([[ 0., 1.], [ 1., 0.]]) >>> # get distance to origin >>> euclidean_distances(X, [[0, 0]]) array([[ 1. ], [ 1.41421356]]) See also -------- paired_distances : distances betweens pairs of elements of X and Y. """ X, Y = check_pairwise_arrays(X, Y) if X_norm_squared is not None: XX = check_array(X_norm_squared) if XX.shape == (1, X.shape[0]): XX = XX.T elif XX.shape != (X.shape[0], 1): raise ValueError( "Incompatible dimensions for X and X_norm_squared") else: XX = row_norms(X, squared=True)[:, np.newaxis] if X is Y: # shortcut in the common case euclidean_distances(X, X) YY = XX.T elif Y_norm_squared is not None: YY = np.atleast_2d(Y_norm_squared) if YY.shape != (1, Y.shape[0]): raise ValueError( "Incompatible dimensions for Y and Y_norm_squared") else: YY = row_norms(Y, squared=True)[np.newaxis, :] distances = safe_sparse_dot(X, Y.T, dense_output=True) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, out=distances) if X is Y: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. distances.flat[::distances.shape[0] + 1] = 0.0 return distances if squared else np.sqrt(distances, out=distances) def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). The minimal distances are also returned. This is mostly equivalent to calling: (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis), pairwise_distances(X, Y=Y, metric=metric).min(axis=axis)) but uses much less memory, and is faster for large arrays. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples1, n_features) Array containing points. Y : {array-like, sparse matrix}, shape (n_samples2, n_features) Arrays containing points. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. metric : string or callable, default 'euclidean' metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric_kwargs : dict, optional Keyword arguments to pass to specified metric function. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. distances : numpy.ndarray distances[i] is the distance between the i-th row in X and the argmin[i]-th row in Y. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin """ dist_func = None if metric in PAIRWISE_DISTANCE_FUNCTIONS: dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif not callable(metric) and not isinstance(metric, str): raise ValueError("'metric' must be a string or a callable") X, Y = check_pairwise_arrays(X, Y) if metric_kwargs is None: metric_kwargs = {} if axis == 0: X, Y = Y, X # Allocate output arrays indices = np.empty(X.shape[0], dtype=np.intp) values = np.empty(X.shape[0]) values.fill(np.infty) for chunk_x in gen_batches(X.shape[0], batch_size): X_chunk = X[chunk_x, :] for chunk_y in gen_batches(Y.shape[0], batch_size): Y_chunk = Y[chunk_y, :] if dist_func is not None: if metric == 'euclidean': # special case, for speed d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True) d_chunk *= -2 d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis] d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :] np.maximum(d_chunk, 0, d_chunk) else: d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs) else: d_chunk = pairwise_distances(X_chunk, Y_chunk, metric=metric, **metric_kwargs) # Update indices and minimum values using chunk min_indices = d_chunk.argmin(axis=1) min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start), min_indices] flags = values[chunk_x] > min_values indices[chunk_x][flags] = min_indices[flags] + chunk_y.start values[chunk_x][flags] = min_values[flags] if metric == "euclidean" and not metric_kwargs.get("squared", False): np.sqrt(values, values) return indices, values def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). This is mostly equivalent to calling: pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis) but uses much less memory, and is faster for large arrays. This function works with dense 2D arrays only. Parameters ---------- X : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) Y : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. metric : string or callable metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric_kwargs : dict keyword arguments to pass to specified metric function. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin_min """ if metric_kwargs is None: metric_kwargs = {} return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size, metric_kwargs)[0] def manhattan_distances(X, Y=None, sum_over_features=True, size_threshold=None): """ Compute the L1 distances between the vectors in X and Y. With sum_over_features equal to False it returns the componentwise distances. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like An array with shape (n_samples_X, n_features). Y : array_like, optional An array with shape (n_samples_Y, n_features). sum_over_features : bool, default=True If True the function returns the pairwise distance matrix else it returns the componentwise L1 pairwise-distances. Not supported for sparse matrix inputs. size_threshold : int, default=5e8 Unused parameter. Returns ------- D : array If sum_over_features is False shape is (n_samples_X * n_samples_Y, n_features) and D contains the componentwise L1 pairwise-distances (ie. absolute difference), else shape is (n_samples_X, n_samples_Y) and D contains the pairwise L1 distances. Examples -------- >>> from sklearn.metrics.pairwise import manhattan_distances >>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS array([[ 0.]]) >>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[1, 2], [3, 4]],\ [[1, 2], [0, 3]])#doctest:+ELLIPSIS array([[ 0., 2.], [ 4., 4.]]) >>> import numpy as np >>> X = np.ones((1, 2)) >>> y = 2 * np.ones((2, 2)) >>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS array([[ 1., 1.], [ 1., 1.]]...) """ if size_threshold is not None: warnings.warn('Use of the "size_threshold" is deprecated ' 'in 0.19 and it will be removed version ' '0.21 of scikit-learn', DeprecationWarning) X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError("sum_over_features=%r not supported" " for sparse matrices" % sum_over_features) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, X.shape[1], D) return D if sum_over_features: return distance.cdist(X, Y, 'cityblock') D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1])) def cosine_distances(X, Y=None): """Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 np.clip(S, 0, 2, out=S) if X is Y or Y is None: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. S[np.diag_indices_from(S)] = 0.0 return S # Paired distances def paired_euclidean_distances(X, Y): """ Computes the paired euclidean distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) return row_norms(X - Y) def paired_manhattan_distances(X, Y): """Compute the L1 distances between the vectors in X and Y. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) diff = X - Y if issparse(diff): diff.data = np.abs(diff.data) return np.squeeze(np.array(diff.sum(axis=1))) else: return np.abs(diff).sum(axis=-1) def paired_cosine_distances(X, Y): """ Computes the paired cosine distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray, shape (n_samples, ) Notes ------ The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm """ X, Y = check_paired_arrays(X, Y) return .5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { 'cosine': paired_cosine_distances, 'euclidean': paired_euclidean_distances, 'l2': paired_euclidean_distances, 'l1': paired_manhattan_distances, 'manhattan': paired_manhattan_distances, 'cityblock': paired_manhattan_distances} def paired_distances(X, Y, metric="euclidean", **kwds): """ Computes the paired distances between X and Y. Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc... Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : ndarray (n_samples, n_features) Array 1 for distance computation. Y : ndarray (n_samples, n_features) Array 2 for distance computation. metric : string or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options specified in PAIRED_DISTANCES, including "euclidean", "manhattan", or "cosine". Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. Returns ------- distances : ndarray (n_samples, ) Examples -------- >>> from sklearn.metrics.pairwise import paired_distances >>> X = [[0, 1], [1, 1]] >>> Y = [[0, 1], [2, 1]] >>> paired_distances(X, Y) array([ 0., 1.]) See also -------- pairwise_distances : pairwise distances. """ if metric in PAIRED_DISTANCES: func = PAIRED_DISTANCES[metric] return func(X, Y) elif callable(metric): # Check the matrix first (it is usually done by the metric) X, Y = check_paired_arrays(X, Y) distances = np.zeros(len(X)) for i in range(len(X)): distances[i] = metric(X[i], Y[i]) return distances else: raise ValueError('Unknown distance %s' % metric) # Kernels def linear_kernel(X, Y=None): """ Compute the linear kernel between X and Y. Read more in the :ref:`User Guide <linear_kernel>`. Parameters ---------- X : array of shape (n_samples_1, n_features) Y : array of shape (n_samples_2, n_features) Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) return safe_sparse_dot(X, Y.T, dense_output=True) def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): """ Compute the polynomial kernel between X and Y:: K(X, Y) = (gamma <X, Y> + coef0)^degree Read more in the :ref:`User Guide <polynomial_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) degree : int, default 3 gamma : float, default None if None, defaults to 1.0 / n_features coef0 : int, default 1 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 K **= degree return K def sigmoid_kernel(X, Y=None, gamma=None, coef0=1): """ Compute the sigmoid kernel between X and Y:: K(X, Y) = tanh(gamma <X, Y> + coef0) Read more in the :ref:`User Guide <sigmoid_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) gamma : float, default None If None, defaults to 1.0 / n_features coef0 : int, default 1 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 np.tanh(K, K) # compute tanh in-place return K def rbf_kernel(X, Y=None, gamma=None): """ Compute the rbf (gaussian) kernel between X and Y:: K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : array of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default None If None, defaults to 1.0 / n_features Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K def laplacian_kernel(X, Y=None, gamma=None): """Compute the laplacian kernel between X and Y. The laplacian kernel is defined as:: K(x, y) = exp(-gamma ||x-y||_1) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <laplacian_kernel>`. .. versionadded:: 0.17 Parameters ---------- X : array of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default None If None, defaults to 1.0 / n_features Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = -gamma * manhattan_distances(X, Y) np.exp(K, K) # exponentiate K in-place return K def cosine_similarity(X, Y=None, dense_output=True): """Compute cosine similarity between samples in X and Y. Cosine similarity, or the cosine kernel, computes similarity as the normalized dot product of X and Y: K(X, Y) = <X, Y> / (||X||*||Y||) On L2-normalized data, this function is equivalent to linear_kernel. Read more in the :ref:`User Guide <cosine_similarity>`. Parameters ---------- X : ndarray or sparse array, shape: (n_samples_X, n_features) Input data. Y : ndarray or sparse array, shape: (n_samples_Y, n_features) Input data. If ``None``, the output will be the pairwise similarities between all samples in ``X``. dense_output : boolean (optional), default True Whether to return dense output even when the input is sparse. If ``False``, the output is sparse if both input arrays are sparse. .. versionadded:: 0.17 parameter ``dense_output`` for dense output. Returns ------- kernel matrix : array An array with shape (n_samples_X, n_samples_Y). """ # to avoid recursive import X, Y = check_pairwise_arrays(X, Y) X_normalized = normalize(X, copy=True) if X is Y: Y_normalized = X_normalized else: Y_normalized = normalize(Y, copy=True) K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output) return K def additive_chi2_kernel(X, Y=None): """Computes the additive chi-squared kernel between observations in X and Y The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result def chi2_kernel(X, Y=None, gamma=1.): """Computes the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default=1. Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- additive_chi2_kernel : The additive version of this kernel sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K) # Helper functions - distance PAIRWISE_DISTANCE_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'cityblock': manhattan_distances, 'cosine': cosine_distances, 'euclidean': euclidean_distances, 'l2': euclidean_distances, 'l1': manhattan_distances, 'manhattan': manhattan_distances, 'precomputed': None, # HACK: precomputed is always allowed, never called } def distance_metrics(): """Valid metrics for pairwise_distances. This function simply returns the valid pairwise distance metrics. It exists to allow for a description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: ============ ==================================== metric Function ============ ==================================== 'cityblock' metrics.pairwise.manhattan_distances 'cosine' metrics.pairwise.cosine_distances 'euclidean' metrics.pairwise.euclidean_distances 'l1' metrics.pairwise.manhattan_distances 'l2' metrics.pairwise.euclidean_distances 'manhattan' metrics.pairwise.manhattan_distances ============ ==================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_DISTANCE_FUNCTIONS def _parallel_pairwise(X, Y, func, n_jobs, **kwds): """Break the pairwise matrix in n_jobs even slices and compute them in parallel""" if n_jobs < 0: n_jobs = max(cpu_count() + 1 + n_jobs, 1) if Y is None: Y = X if n_jobs == 1: # Special case to avoid picklability checks in delayed return func(X, Y, **kwds) # TODO: in some cases, backend='threading' may be appropriate fd = delayed(func) ret = Parallel(n_jobs=n_jobs, verbose=0)( fd(X, Y[s], **kwds) for s in gen_even_slices(Y.shape[0], n_jobs)) return np.hstack(ret) def _pairwise_callable(X, Y, metric, **kwds): """Handle the callable case for pairwise_{distances,kernels} """ X, Y = check_pairwise_arrays(X, Y) if X is Y: # Only calculate metric for upper triangle out = np.zeros((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.combinations(range(X.shape[0]), 2) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) # Make symmetric # NB: out += out.T will produce incorrect results out = out + out.T # Calculate diagonal # NB: nonzero diagonals are allowed for both metrics and kernels for i in range(X.shape[0]): x = X[i] out[i, i] = metric(x, x, **kwds) else: # Calculate all cells out = np.empty((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.product(range(X.shape[0]), range(Y.shape[0])) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) return out _VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"] def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds): """ Compute the distance matrix from a vector array X and optional Y. This method takes either a vector array or a distance matrix, and returns a distance matrix. If the input is a vector array, the distances are computed. If the input is a distances matrix, it is returned instead. This method provides a safe way to take a distance matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise distance between the arrays from both X and Y. Valid values for metric are: - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']. These metrics support sparse matrix inputs. - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. These metrics do not support sparse matrix inputs. Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are valid scipy.spatial.distance metrics), the scikit-learn implementation will be used, which is faster and has support for sparse matrices (except for 'cityblock'). For a verbose description of the metrics from scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics function. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. Y : array [n_samples_b, n_features], optional An optional second feature array. Only allowed if metric != "precomputed". metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A distance matrix D such that D_{i, j} is the distance between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then D_{i, j} is the distance between the ith array from X and the jth array from Y. """ if (metric not in _VALID_METRICS and not callable(metric) and metric != "precomputed"): raise ValueError("Unknown metric %s. " "Valid metrics are %s, or 'precomputed', or a " "callable" % (metric, _VALID_METRICS)) if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: if issparse(X) or issparse(Y): raise TypeError("scipy distance metrics do not" " support sparse matrices.") dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None X, Y = check_pairwise_arrays(X, Y, dtype=dtype) if n_jobs == 1 and X is Y: return distance.squareform(distance.pdist(X, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) # These distances recquire boolean arrays, when using scipy.spatial.distance PAIRWISE_BOOLEAN_FUNCTIONS = [ 'dice', 'jaccard', 'kulsinski', 'matching', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath', 'yule', ] # Helper functions - distance PAIRWISE_KERNEL_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'additive_chi2': additive_chi2_kernel, 'chi2': chi2_kernel, 'linear': linear_kernel, 'polynomial': polynomial_kernel, 'poly': polynomial_kernel, 'rbf': rbf_kernel, 'laplacian': laplacian_kernel, 'sigmoid': sigmoid_kernel, 'cosine': cosine_similarity, } def kernel_metrics(): """ Valid metrics for pairwise_kernels This function simply returns the valid pairwise distance metrics. It exists, however, to allow for a verbose description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'additive_chi2' sklearn.pairwise.additive_chi2_kernel 'chi2' sklearn.pairwise.chi2_kernel 'linear' sklearn.pairwise.linear_kernel 'poly' sklearn.pairwise.polynomial_kernel 'polynomial' sklearn.pairwise.polynomial_kernel 'rbf' sklearn.pairwise.rbf_kernel 'laplacian' sklearn.pairwise.laplacian_kernel 'sigmoid' sklearn.pairwise.sigmoid_kernel 'cosine' sklearn.pairwise.cosine_similarity =============== ======================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_KERNEL_FUNCTIONS KERNEL_PARAMS = { "additive_chi2": (), "chi2": frozenset(["gamma"]), "cosine": (), "linear": (), "poly": frozenset(["gamma", "degree", "coef0"]), "polynomial": frozenset(["gamma", "degree", "coef0"]), "rbf": frozenset(["gamma"]), "laplacian": frozenset(["gamma"]), "sigmoid": frozenset(["gamma", "coef0"]), } def pairwise_kernels(X, Y=None, metric="linear", filter_params=False, n_jobs=1, **kwds): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine'] Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. filter_params : boolean Whether to filter invalid parameters or not. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. **kwds : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ # import GPKernel locally to prevent circular imports from ..gaussian_process.kernels import Kernel as GPKernel if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif isinstance(metric, GPKernel): func = metric.__call__ elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = dict((k, kwds[k]) for k in kwds if k in KERNEL_PARAMS[metric]) func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
mit
Durabun/QWell
index_plot.py
1
1705
import numpy as np import matplotlib.pyplot as plt evenR = np.array([1.212,3.368]) oddR = np.array([2.381]) S = evenR.size+oddR.size prop = np.zeros(S) tunn = np.zeros(S) i=0 j=1 a=0.469 def rad(x): return np.sqrt((1.1*np.pi)**2-x**2) print (S) print (prop) print (tunn) while i< evenR.size: prop[2*i] = evenR[i]/a tunn[2*i] = rad(evenR[i])/a i=i+1 print (i) print("odd") while j-1 < oddR.size: prop[j] = oddR[j-1]/a tunn[j] = rad(oddR[j-1])/a j=j+2 print (j) print (prop) print (tunn) Bcoeff = np.array([0.6318,0.6171,0.4823]) #def Bfn(k,K): # return k+1.2*K l = 0 #while l < S: # Bcoeff[l] = Bfn(prop[l],tunn[l]) # l=l+1 print (Bcoeff) z = 0 def ef1(B,K,k,a): return lambda x: 2*B*np.exp((a+x)*K)*np.cos(a*k) def ef2(B,k): return lambda x: 2*B*np.cos(k*x) def ef3(B,K,k,a): return lambda x: 2*B*np.exp((a-x)*K)*np.cos(a*k) def of1(B,K,k,a): return lambda x: -2*B*np.exp((a+x)*K)*np.sin(a*k) def of2(B,k): return lambda x: 2*B*np.sin(k*x) def of3(B,K,k,a): return lambda x: 2*B*np.exp((a-x)*K)*np.sin(a*k) r1 = np.arange(-5,-a,0.001) r2 = np.arange(-a,a,0.001) r3 = np.arange(a,5,0.001) color = ["r","b","g"] while z <S: # plt.figure if z%2 == 1: plt1 = of1(Bcoeff[z],tunn[z],prop[z],a) plt2 = of2(Bcoeff[z],prop[z]) plt3 = of3(Bcoeff[z],tunn[z],prop[z],a) plt.plot(r1,plt1(r1),color[z],r2,plt2(r2),color[z],r3,plt3(r3),color[z]) # plt.plot(r2,plt2(r2)) # plt.plot(r3,plt3(r3)) else: plt1 = ef1(Bcoeff[z],tunn[z],prop[z],a) plt2 = ef2(Bcoeff[z],prop[z]) plt3 = ef3(Bcoeff[z],tunn[z],prop[z],a) plt.plot(r1,plt1(r1),color[z],r2,plt2(r2),color[z],r3,plt3(r3),color[z]) # plt.plot(r2,plt2(r2)) # plt.plot(r3,plt3(r3)) z = z+1 plt.show()
apache-2.0
brodoll/sms-tools
lectures/06-Harmonic-model/plots-code/oboe-spectrum.py
24
1032
import numpy as np import matplotlib.pyplot as plt from scipy.signal import hamming, triang, blackmanharris import math import sys, os, functools, time sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/')) import dftModel as DFT import utilFunctions as UF (fs, x) = UF.wavread('../../../sounds/oboe-A4.wav') w = np.blackman(651) N = 1024 pin = 5000 hM1 = int(math.floor((w.size+1)/2)) hM2 = int(math.floor(w.size/2)) x1 = x[pin-hM1:pin+hM2] mX, pX = DFT.dftAnal(x1, w, N) plt.figure(1, figsize=(9, 7)) plt.subplot(311) plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5) plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)]) plt.title('x (oboe-A4.wav)') plt.subplot(3,1,2) plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5) plt.axis([0,fs/3,-90,max(mX)]) plt.title ('mX') plt.subplot(3,1,3) plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5) plt.axis([0,fs/3,min(pX),18]) plt.title ('pX') plt.tight_layout() plt.savefig('oboe-spectrum.png') plt.show()
agpl-3.0
eickenberg/scikit-learn
sklearn/datasets/tests/test_lfw.py
50
6849
"""This test for the LFW require medium-size data dowloading and processing If the data has not been already downloaded by running the examples, the tests won't run (skipped). If the test are run, the first execution will be long (typically a bit more than a couple of minutes) but as the dataset loader is leveraging joblib, successive runs will be fast (less than 200ms). """ import random import os import shutil import tempfile import numpy as np from sklearn.externals import six try: try: from scipy.misc import imsave except ImportError: from scipy.misc.pilutil import imsave except ImportError: imsave = None from sklearn.datasets import load_lfw_pairs from sklearn.datasets import load_lfw_people from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import raises SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home') FAKE_NAMES = [ 'Abdelatif_Smith', 'Abhati_Kepler', 'Camara_Alvaro', 'Chen_Dupont', 'John_Lee', 'Lin_Bauman', 'Onur_Lopez', ] def setup_module(): """Test fixture run once and common to all tests of this module""" if imsave is None: raise SkipTest("PIL not installed.") if not os.path.exists(LFW_HOME): os.makedirs(LFW_HOME) random_state = random.Random(42) np_rng = np.random.RandomState(42) # generate some random jpeg files for each person counts = {} for name in FAKE_NAMES: folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name) if not os.path.exists(folder_name): os.makedirs(folder_name) n_faces = np_rng.randint(1, 5) counts[name] = n_faces for i in range(n_faces): file_path = os.path.join(folder_name, name + '_%04d.jpg' % i) uniface = np_rng.randint(0, 255, size=(250, 250, 3)) try: imsave(file_path, uniface) except ImportError: raise SkipTest("PIL not installed") # add some random file pollution to test robustness with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f: f.write(six.b('Text file to be ignored by the dataset loader.')) # generate some pairing metadata files using the same format as LFW with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f: f.write(six.b("10\n")) more_than_two = [name for name, count in six.iteritems(counts) if count >= 2] for i in range(5): name = random_state.choice(more_than_two) first, second = random_state.sample(range(counts[name]), 2) f.write(six.b('%s\t%d\t%d\n' % (name, first, second))) for i in range(5): first_name, second_name = random_state.sample(FAKE_NAMES, 2) first_index = random_state.choice(np.arange(counts[first_name])) second_index = random_state.choice(np.arange(counts[second_name])) f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index, second_name, second_index))) with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) def teardown_module(): """Test fixture (clean up) run once after all tests of this module""" if os.path.isdir(SCIKIT_LEARN_DATA): shutil.rmtree(SCIKIT_LEARN_DATA) if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA): shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA) @raises(IOError) def test_load_empty_lfw_people(): load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA) def test_load_fake_lfw_people(): lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_people.images.shape, (10, 62, 47)) assert_equal(lfw_people.data.shape, (10, 2914)) # the target is array of person integer ids assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2]) # names of the persons can be found using the target_names array expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez'] assert_array_equal(lfw_people.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion and not limit on the number of picture per person lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True) assert_equal(lfw_people.images.shape, (17, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]) assert_array_equal(lfw_people.target_names, ['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro', 'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez']) @raises(ValueError) def test_load_fake_lfw_people_too_restrictive(): load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100) @raises(IOError) def test_load_empty_lfw_pairs(): load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA) def test_load_fake_lfw_pairs(): lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47)) # the target is whether the person is the same or not assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) # names of the persons can be found using the target_names array expected_classes = ['Different persons', 'Same person'] assert_array_equal(lfw_pairs_train.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True) assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) assert_array_equal(lfw_pairs_train.target_names, expected_classes)
bsd-3-clause
okadate/romspy
romspy/tplot/tplot_valification.py
1
5910
# coding: utf-8 # (c) 2015-11-25 Teruhisa Okada import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter import netCDF4 import numpy as np import pandas as pd import datetime import romspy layers = {1:[1], 2:[1], 3:[1, 18], 4:[1, 14], 5:[0.7, 7.5], 6:[1, 10], 8:[3.5, 12], 9:[6.5, 15], 10:[1, 6.1], 11:[1, 2.9], 12:[1, 10], 13:[1, 11]} layers[13] = [1,10] def tplot_valification(obsfile, modfile, varid, s, dates, **kw): layers = {1:[1], 2:[1], 3:[1, 18], 4:[1, 14], 5:[0.7, 7.5], 6:[1, 10], 8:[3.5, 12], 9:[6.5, 15], 10:[1, 6.1], 11:[1, 2.9], 12:[1, 10], 13:[1, 11]} layers[13] = [1,10] layers = kw.pop('layers', layers) ax = kw.pop('ax', None) date_format = kw.pop('date_format', '%Y-%m') resample = kw.pop('resample', 'D') assimilation = kw.pop('assimilation', False) legend = kw.pop('legend', True) print obsfile, modfile, varid, s, dates obs = netCDF4.Dataset(obsfile, 'r') mod = netCDF4.Dataset(modfile, 'r') var = obs.variables time = var['obs_time'][:] type = var['obs_type'][:] station = var['obs_station'][:] layer = var['obs_layer'][:] index_type = (type==varid) index_station = (station==s) times = netCDF4.date2num(dates, romspy.JST_days) index_date = (times[0]<=time) & (time<times[-1]) if ax is None: ax = plt.gca() for k in layers[s]: index = np.where(index_type & index_station & index_date & (layer==k)) time = var['obs_time'][index] time = netCDF4.num2date(time, romspy.JST_days) obs_val = var['obs_value'][index] if assimilation: mod_val = mod.variables['NLmodel_initial'][index] mod_val_assim = mod.variables['NLmodel_value'][index] else: mod_val = mod.variables['NLmodel_value'][index] if varid == 15: cff = romspy.mol2g_O2 else: cff = 1.0 data = {'obs':obs_val*cff, 'model':mod_val*cff} if assimilation: data['assim'] = mod_val_assim*cff df = pd.DataFrame(data, index=time) df = df.dropna() df = df[df.model < 1000] loffset = {'M':'-15D', 'D':'-12H', 'H':'-30min'} df = df.resample(resample, how='mean', loffset=loffset[resample]) colors = {layers[s][0]:'#4D71AF', layers[s][-1]:'#C34F53'} ax.plot(df.index.values, df.obs.values, '.-', lw=0.5, color=colors[k], label='obs ({})'.format(k)) if assimilation: ax.plot(df.index.values, df.model.values, '--', lw=1.5, color=colors[k], label='background ({})'.format(k)) ax.plot(df.index.values, df.assim.values, '-', lw=1.5, color=colors[k], label='assimilation ({})'.format(k)) else: ax.plot(df.index.values, df.model.values, '-', lw=1.5, color=colors[k], label='mod ({})'.format(k)) #ax.grid() if legend: ax.legend(loc='best') ax.set_title('Sta.{}'.format(s)) ylabels = {6:'temperature [degC]', 7:'salinity', 10:'chlorophyll [mg/m3]', 15:'disolved oxygen [mg/l]'} ax.set_ylabel(ylabels[varid]) ax.set_xlim(dates[0], dates[-1]) ax.xaxis.set_major_formatter(DateFormatter(date_format)) def tplot_valification_3_2(obsfile, modfiles, varid, dates, **kw): stations = [3,4,5,6,12,13] fig, axes = plt.subplots(3, 2, figsize=[10,10]) axlist = [axes[y][x] for x in range(2) for y in range(3)] for station, ax in zip(stations, axlist): tplot_valification(obsfile, modfile, varid, station, dates, ax=ax, legend=False, **kw) axlist[3].legend(bbox_to_anchor=(1.4, 1)) plt.subplots_adjust(right=0.8) def tplot_valification_6_1(obsfile, modfiles, varid, dates, **kw): stations = [3,4,5,6,12,13] fig, axes = plt.subplots(6, 1, figsize=[10,13]) for station, ax in zip(stations, axes): for modfile in modfiles: tplot_valification(obsfile, modfile, varid, station, dates, ax=ax, legend=False, **kw) if varid == 6: ax.set_ylim(5, 30) elif varid == 7: ax.set_ylim(23, 33) elif varid == 10: ax.set_ylim(0, 30) elif varid == 15: ax.set_ylim(0, 12) axes[0].legend(bbox_to_anchor=(1.25, 1.1)) plt.subplots_adjust(right=0.8, hspace=0.4) def tplot_valification_1(obsfile, modfiles, varid, dates, station=12, **kw): fig, ax = plt.subplots(1, 1) for modfile in modfiles: tplot_valification(obsfile, modfile, varid, station, dates, ax=ax, legend=False, **kw) if varid == 6: ax.set_ylim(5, 30) elif varid == 7: ax.set_ylim(23, 33) elif varid == 10: ax.set_ylim(0, 30) elif varid == 15: ax.set_ylim(0, 12) #ax.legend(bbox_to_anchor=(1.25, 1.1)) #plt.subplots_adjust(right=0.8) if __name__ == '__main__': import seaborn as sns #obsfile = '/home/okada/Data/ob500_obs_2012_mp-1_ts.nc' #obsfile = '/home/okada/Data/ob500_obs_2012_mp-2.nc' obsfile = '/home/okada/Data/ob500_obs_2012_mp-3_clean.nc' #modfile = '/home/okada/ism-i/apps/OB500P/case28/DA0-3.1/output/ob500_mod_0.nc' modfiles = ['/home/okada/ism-i/apps/OB500P/testDA/param4_0010_2/output/ob500_mod_{}.nc'.format(i*24) for i in range(0,7)] #modfiles = ['/home/okada/ism-i/apps/OB500P/case28/DA0-5.1/output/ob500_mod_{}.nc'.format(i*24) for i in range(0,7)] varid = 15 #dates = [datetime.datetime(2012,8,3,0), datetime.datetime(2012,8,4,0)] #tplot_valification_3_2(obsfile, modfiles[0], varid, dates, date_format='%m/%d', resample='H', assimilation=True) dates = [datetime.datetime(2012,1,1,0), datetime.datetime(2012,1,8,0)] tplot_valification_6_1(obsfile, modfiles, varid, dates, date_format='%m/%d', resample='H', assimilation=True) #tplot_valification_1(obsfile, modfiles, varid, dates, date_format='%m/%d', resample='H', assimilation=True) plt.show()
mit
hlin117/scikit-learn
sklearn/gaussian_process/tests/test_gaussian_process.py
46
7057
""" Testing for Gaussian Process module (sklearn.gaussian_process) """ # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # License: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from sklearn.gaussian_process import regression_models as regression from sklearn.gaussian_process import correlation_models as correlation from sklearn.datasets import make_regression from sklearn.utils.testing import assert_greater, assert_true, raises f = lambda x: x * np.sin(x) X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = f(X).ravel() def test_1d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a one-dimensional Gaussian Process model. # Check random start optimization. # Test the interpolating property. gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=random_start, verbose=False).fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) y2_pred, MSE2 = gp.predict(X2, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.) and np.allclose(MSE2, 0., atol=10)) def test_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the interpolating property. b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = g(X).ravel() thetaL = [1e-4] * 2 thetaU = [1e-1] * 2 gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=thetaL, thetaU=thetaU, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) eps = np.finfo(gp.theta_.dtype).eps assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the GP interpolation for 2D output b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. f = lambda x: np.vstack((g(x), g(x))).T X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = f(X) gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=[1e-4] * 2, thetaU=[1e-1] * 2, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) @raises(ValueError) def test_wrong_number_of_outputs(): gp = GaussianProcess() gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) def test_more_builtin_correlation_models(random_start=1): # Repeat test_1d and test_2d for several built-in correlation # models specified as strings. all_corr = ['absolute_exponential', 'squared_exponential', 'cubic', 'linear'] for corr in all_corr: test_1d(regr='constant', corr=corr, random_start=random_start) test_2d(regr='constant', corr=corr, random_start=random_start) test_2d_2d(regr='constant', corr=corr, random_start=random_start) def test_ordinary_kriging(): # Repeat test_1d and test_2d with given regression weights (beta0) for # different regression models (Ordinary Kriging). test_1d(regr='linear', beta0=[0., 0.5]) test_1d(regr='quadratic', beta0=[0., 0.5, 0.5]) test_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) def test_no_normalize(): gp = GaussianProcess(normalize=False).fit(X, y) y_pred = gp.predict(X) assert_true(np.allclose(y_pred, y)) def test_batch_size(): # TypeError when using batch_size on Python 3, see # https://github.com/scikit-learn/scikit-learn/issues/7329 for more # details gp = GaussianProcess() gp.fit(X, y) gp.predict(X, batch_size=1) gp.predict(X, batch_size=1, eval_MSE=True) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the reduced likelihood function of the optimal theta. n_samples, n_features = 50, 3 np.random.seed(0) rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) best_likelihood = -np.inf for random_start in range(1, 5): gp = GaussianProcess(regr="constant", corr="squared_exponential", theta0=[1e-0] * n_features, thetaL=[1e-4] * n_features, thetaU=[1e+1] * n_features, random_start=random_start, random_state=0, verbose=False).fit(X, y) rlf = gp.reduced_likelihood_function()[0] assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps) best_likelihood = rlf def test_mse_solving(): # test the MSE estimate to be sane. # non-regression test for ignoring off-diagonals of feature covariance, # testing with nugget that renders covariance useless, only # using the mean function, with low effective rank of data gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4, thetaL=1e-12, thetaU=1e-2, nugget=1e-2, optimizer='Welch', regr="linear", random_state=0) X, y = make_regression(n_informative=3, n_features=60, noise=50, random_state=0, effective_rank=1) gp.fit(X, y) assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
bsd-3-clause
JasonKessler/scattertext
scattertext/categoryprojector/CategoryProjection.py
1
6779
from abc import ABC, abstractmethod import pandas as pd import numpy as np from scattertext.Scalers import stretch_neg1_to_1 class CategoryProjectionBase(ABC): ''' ''' def _pseduo_init(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, term_projection=None): self.category_corpus = category_corpus self.category_counts = category_counts self.x_dim = x_dim self.y_dim = y_dim self.projection = projection self.term_projection = term_projection def project_with_alternative_dimensions(self, x_dim, y_dim): return CategoryProjection(self.category_corpus, self.category_counts, self.projection, x_dim, y_dim) def project_with_alternate_axes(self, x_axis=None, y_axis=None): # !!! Need to fix if x_axis is None: x_axis = self._get_x_axis() if y_axis is None: y_axis = self._get_y_axis() return CategoryProjectionAlternateAxes(self.category_corpus, self.category_counts, self.projection, self.get_category_embeddings(), self.x_dim, self.y_dim, x_axis=x_axis, y_axis=y_axis) def get_pandas_projection(self): ''' :param x_dim: int :param y_dim: int :return: pd.DataFrame ''' return pd.DataFrame({'term': self.category_corpus.get_metadata(), 'x': self._get_x_axis(), 'y': self._get_y_axis()}).set_index('term') def _get_x_axis(self): return self.projection.T[self.x_dim] def _get_y_axis(self): return self.projection.T[self.y_dim] def get_axes_labels(self, num_terms=5): df = self.get_term_projection() return {'right': list(df.sort_values(by='x', ascending=False).index[:num_terms]), 'left': list(df.sort_values(by='x', ascending=True).index[:num_terms]), 'top': list(df.sort_values(by='y', ascending=False).index[:num_terms]), 'bottom': list(df.sort_values(by='y', ascending=True).index[:num_terms])} def get_nearest_terms(self, num_terms=5): df = self.get_term_projection().apply(stretch_neg1_to_1) return { 'top_right': ((df.x - 1) ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values, 'top': (df.x ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values, 'top_left': ((df.x + 1) ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values, 'right': ((df.x - 1) ** 2 + df.y ** 2).sort_values().index[:num_terms].values, 'left': ((df.x + 1) ** 2 + df.y ** 2).sort_values().index[:num_terms].values, 'bottom_right': ((df.x - 1) ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values, 'bottom': (df.x ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values, 'bottom_left': ((df.x + 1) ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values, } def get_term_projection(self): if self.term_projection is None: # np.ndarray(self.category_counts.values) * self._get_x_y_projection() dim_term = np.matmul(self.category_counts.values, self._get_x_y_projection()) else: dim_term = self.term_projection df = pd.DataFrame(dim_term, index=self.category_corpus.get_terms(), columns=['x', 'y']) return df def _get_x_y_projection(self): return np.array([self._get_x_axis(), self._get_y_axis()]).T def get_projection(self): return self.projection @abstractmethod def use_alternate_projection(self, projection): pass @abstractmethod def get_category_embeddings(self): pass def get_corpus(self): return self.category_corpus class CategoryProjection(CategoryProjectionBase): def __init__(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, term_projection=None): self._pseduo_init(category_corpus, category_counts, projection, x_dim, y_dim, term_projection) def get_category_embeddings(self): return self.category_counts.values def use_alternate_projection(self, projection): return CategoryProjection(self.category_corpus, self.category_counts, projection, self.x_dim, self.y_dim) class CategoryProjectionWithDoc2Vec(CategoryProjectionBase): def __init__(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, doc2vec_model=None, term_projection=None, ): self.doc2vec_model = doc2vec_model self._pseduo_init(category_corpus, category_counts, projection, x_dim, y_dim, term_projection) def project_with_alternative_dimensions(self, x_dim, y_dim): return CategoryProjectionWithDoc2Vec(self.category_corpus, self.category_counts, self.projection, x_dim, y_dim, doc2vec_model=self.doc2vec_model) def get_category_embeddings(self): return self.doc2vec_model.project() def use_alternate_projection(self, projection): return CategoryProjectionWithDoc2Vec(self.category_corpus, self.category_counts, projection, self.x_dim, self.y_dim, doc2vec_model=self.doc2vec_model) # !!! Need to fix class CategoryProjectionAlternateAxes(CategoryProjectionBase): def __init__(self, category_corpus, category_counts, projection, category_embeddings, x_dim=0, y_dim=1, x_axis=None, y_axis=None): self._pseduo_init(category_corpus, category_counts, projection, x_dim=x_dim, y_dim=y_dim) self.x_axis_ = x_axis self.y_axis_ = y_axis self.category_embeddings_ = category_embeddings def get_category_embeddings(self): return self.category_embeddings_ def _get_x_axis(self): return self.x_axis_ def _get_y_axis(self): return self.y_axis_ def project_raw_corpus(category_corpus, projection, projection_type=CategoryProjection, term_projection=None, x_dim=0, y_dim=1): return projection_type(category_corpus, category_corpus.get_term_freq_df(), projection, x_dim, y_dim, term_projection)
apache-2.0
CartoDB/cartoframes
cartoframes/data/observatory/catalog/category.py
1
4237
from .entity import CatalogEntity from .repository.constants import CATEGORY_FILTER from .repository.category_repo import get_category_repo from .repository.dataset_repo import get_dataset_repo from .repository.geography_repo import get_geography_repo class Category(CatalogEntity): """This class represents a :py:class:`Category <cartoframes.data.observatory.Category>` in the :py:class:`Catalog <cartoframes.data.observatory.Catalog>`. Catalog datasets (:py:class:`Dataset <cartoframes.data.observatory.Dataset>` class) are grouped by `categories`, so you can filter available `datasets` and `geographies` that belong (or are related) to a given `Category`. Examples: List the available categories in the :py:class:`Catalog <cartoframes.data.observatory.Catalog>` >>> catalog = Catalog() >>> categories = catalog.categories Get a :py:class:`Category <cartoframes.data.observatory.Category>` from the :py:class:`Catalog <cartoframes.data.observatory.Catalog>` given its ID >>> category = Category.get('demographics') """ _entity_repo = get_category_repo() @property def datasets(self): """Get the list of :obj:`Dataset` related to this category. Returns: :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>` List of Dataset instances. Raises: CatalogError: if there's a problem when connecting to the catalog or no datasets are found. Examples: Get all the `datasets` :py:class:`Dataset <cartoframes.data.observatory.Dataset>` available in the `catalog` for a :py:class:`Category <cartoframes.data.observatory.Category>` instance >>> category = Category.get('demographics') >>> datasets = category.datasets Same example as above but using nested filters: >>> catalog = Catalog() >>> datasets = catalog.category('demographics').datasets You can perform other operations with a :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`: >>> catalog = Catalog() >>> datasets = catalog.category('demographics').datasets >>> # convert the list of datasets into a pandas DataFrame >>> # for further filtering and exploration >>> dataframe = datasets.to_dataframe() >>> # get a dataset by ID or slug >>> dataset = Dataset.get(A_VALID_ID_OR_SLUG) """ return get_dataset_repo().get_all({CATEGORY_FILTER: self.id}) @property def geographies(self): """Get the list of :obj:`Geography` related to this category. Returns: :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>` List of Geography instances. Raises: CatalogError: if there's a problem when connecting to the catalog or no datasets are found. Examples: Get all the `geographies` :py:class:`Dataset <cartoframes.data.observatory.Dataset>` available in the `catalog` for a :py:class:`Category <cartoframes.data.observatory.Category>` instance >>> category = Category.get('demographics') >>> geographies = category.geographies Same example as above but using nested filters: >>> catalog = Catalog() >>> geographies = catalog.category('demographics').geographies You can perform these other operations with a :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`: >>> catalog = Catalog() >>> geographies = catalog.category('demographics').geographies >>> # convert the list of datasets into a pandas DataFrame >>> # for further filtering and exploration >>> dataframe = geographies.to_dataframe() >>> # get a geography by ID or slug >>> dataset = Geography.get(A_VALID_ID_OR_SLUG) """ return get_geography_repo().get_all({CATEGORY_FILTER: self.id}) @property def name(self): """Name of this category instance.""" return self.data['name']
bsd-3-clause
AlexanderFabisch/scikit-learn
sklearn/cluster/tests/test_affinity_propagation.py
341
2620
""" Testing for Clustering methods """ import numpy as np from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.cluster.affinity_propagation_ import AffinityPropagation from sklearn.cluster.affinity_propagation_ import affinity_propagation from sklearn.datasets.samples_generator import make_blobs from sklearn.metrics import euclidean_distances n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=60, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=0) def test_affinity_propagation(): # Affinity Propagation algorithm # Compute similarities S = -euclidean_distances(X, squared=True) preference = np.median(S) * 10 # Compute Affinity Propagation cluster_centers_indices, labels = affinity_propagation( S, preference=preference) n_clusters_ = len(cluster_centers_indices) assert_equal(n_clusters, n_clusters_) af = AffinityPropagation(preference=preference, affinity="precomputed") labels_precomputed = af.fit(S).labels_ af = AffinityPropagation(preference=preference, verbose=True) labels = af.fit(X).labels_ assert_array_equal(labels, labels_precomputed) cluster_centers_indices = af.cluster_centers_indices_ n_clusters_ = len(cluster_centers_indices) assert_equal(np.unique(labels).size, n_clusters_) assert_equal(n_clusters, n_clusters_) # Test also with no copy _, labels_no_copy = affinity_propagation(S, preference=preference, copy=False) assert_array_equal(labels, labels_no_copy) # Test input validation assert_raises(ValueError, affinity_propagation, S[:, :-1]) assert_raises(ValueError, affinity_propagation, S, damping=0) af = AffinityPropagation(affinity="unknown") assert_raises(ValueError, af.fit, X) def test_affinity_propagation_predict(): # Test AffinityPropagation.predict af = AffinityPropagation(affinity="euclidean") labels = af.fit_predict(X) labels2 = af.predict(X) assert_array_equal(labels, labels2) def test_affinity_propagation_predict_error(): # Test exception in AffinityPropagation.predict # Not fitted. af = AffinityPropagation(affinity="euclidean") assert_raises(ValueError, af.predict, X) # Predict not supported when affinity="precomputed". S = np.dot(X, X.T) af = AffinityPropagation(affinity="precomputed") af.fit(S) assert_raises(ValueError, af.predict, X)
bsd-3-clause
numenta/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wx.py
69
77038
from __future__ import division """ backend_wx.py A wxPython backend for matplotlib, based (very heavily) on backend_template.py and backend_gtk.py Author: Jeremy O'Donoghue (jeremy@o-donoghue.com) Derived from original copyright work by John Hunter (jdhunter@ace.bsd.uchicago.edu) Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4 License: This work is licensed under a PSF compatible license. A copy should be included with this source code. """ """ KNOWN BUGS - - Mousewheel (on Windows) only works after menu button has been pressed at least once - Mousewheel on Linux (wxGTK linked against GTK 1.2) does not work at all - Vertical text renders horizontally if you use a non TrueType font on Windows. This is a known wxPython issue. Work-around is to ensure that you use a TrueType font. - Pcolor demo puts chart slightly outside bounding box (approx 1-2 pixels to the bottom left) - Outputting to bitmap more than 300dpi results in some text being incorrectly scaled. Seems to be a wxPython bug on Windows or font point sizes > 60, as font size is correctly calculated. - Performance poorer than for previous direct rendering version - TIFF output not supported on wxGTK. This is a wxGTK issue - Text is not anti-aliased on wxGTK. This is probably a platform configuration issue. - If a second call is made to show(), no figure is generated (#866965) Not implemented: - Printing Fixed this release: - Bug #866967: Interactive operation issues fixed [JDH] - Bug #866969: Dynamic update does not function with backend_wx [JOD] Examples which work on this release: --------------------------------------------------------------- | Windows 2000 | Linux | | wxPython 2.3.3 | wxPython 2.4.2.4 | --------------------------------------------------------------| - alignment_test.py | TBE | OK | - arctest.py | TBE | (3) | - axes_demo.py | OK | OK | - axes_props.py | OK | OK | - bar_stacked.py | TBE | OK | - barchart_demo.py | OK | OK | - color_demo.py | OK | OK | - csd_demo.py | OK | OK | - dynamic_demo.py | N/A | N/A | - dynamic_demo_wx.py | TBE | OK | - embedding_in_gtk.py | N/A | N/A | - embedding_in_wx.py | OK | OK | - errorbar_demo.py | OK | OK | - figtext.py | OK | OK | - histogram_demo.py | OK | OK | - interactive.py | N/A (2) | N/A (2) | - interactive2.py | N/A (2) | N/A (2) | - legend_demo.py | OK | OK | - legend_demo2.py | OK | OK | - line_styles.py | OK | OK | - log_demo.py | OK | OK | - logo.py | OK | OK | - mpl_with_glade.py | N/A (2) | N/A (2) | - mri_demo.py | OK | OK | - mri_demo_with_eeg.py | OK | OK | - multiple_figs_demo.py | OK | OK | - pcolor_demo.py | OK | OK | - psd_demo.py | OK | OK | - scatter_demo.py | OK | OK | - scatter_demo2.py | OK | OK | - simple_plot.py | OK | OK | - stock_demo.py | OK | OK | - subplot_demo.py | OK | OK | - system_monitor.py | N/A (2) | N/A (2) | - text_handles.py | OK | OK | - text_themes.py | OK | OK | - vline_demo.py | OK | OK | --------------------------------------------------------------- (2) - Script uses GTK-specific features - cannot not run, but wxPython equivalent should be written. (3) - Clipping seems to be broken. """ cvs_id = '$Id: backend_wx.py 6484 2008-12-03 18:38:03Z jdh2358 $' import sys, os, os.path, math, StringIO, weakref, warnings import numpy as npy # Debugging settings here... # Debug level set here. If the debug level is less than 5, information # messages (progressively more info for lower value) are printed. In addition, # traceback is performed, and pdb activated, for all uncaught exceptions in # this case _DEBUG = 5 if _DEBUG < 5: import traceback, pdb _DEBUG_lvls = {1 : 'Low ', 2 : 'Med ', 3 : 'High', 4 : 'Error' } try: import wx backend_version = wx.VERSION_STRING except: raise ImportError("Matplotlib backend_wx requires wxPython be installed") #!!! this is the call that is causing the exception swallowing !!! #wx.InitAllImageHandlers() def DEBUG_MSG(string, lvl=3, o=None): if lvl >= _DEBUG: cls = o.__class__ # Jeremy, often times the commented line won't print but the # one below does. I think WX is redefining stderr, damned # beast #print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls) print "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls) def debug_on_error(type, value, tb): """Code due to Thomas Heller - published in Python Cookbook (O'Reilley)""" traceback.print_exc(type, value, tb) print pdb.pm() # jdh uncomment class fake_stderr: """Wx does strange things with stderr, as it makes the assumption that there is probably no console. This redirects stderr to the console, since we know that there is one!""" def write(self, msg): print "Stderr: %s\n\r" % msg #if _DEBUG < 5: # sys.excepthook = debug_on_error # WxLogger =wx.LogStderr() # sys.stderr = fake_stderr # Event binding code changed after version 2.5 if wx.VERSION_STRING >= '2.5': def bind(actor,event,action,**kw): actor.Bind(event,action,**kw) else: def bind(actor,event,action,id=None): if id is not None: event(actor, id, action) else: event(actor,action) import matplotlib from matplotlib import verbose from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureCanvasBase, FigureManagerBase, NavigationToolbar2, \ cursors from matplotlib._pylab_helpers import Gcf from matplotlib.artist import Artist from matplotlib.cbook import exception_to_str, is_string_like, is_writable_file_like from matplotlib.figure import Figure from matplotlib.path import Path from matplotlib.text import _process_text_args, Text from matplotlib.transforms import Affine2D from matplotlib.widgets import SubplotTool from matplotlib import rcParams # the True dots per inch on the screen; should be display dependent # see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi PIXELS_PER_INCH = 75 # Delay time for idle checks IDLE_DELAY = 5 def error_msg_wx(msg, parent=None): """ Signal an error condition -- in a GUI, popup a error dialog """ dialog =wx.MessageDialog(parent = parent, message = msg, caption = 'Matplotlib backend_wx error', style=wx.OK | wx.CENTRE) dialog.ShowModal() dialog.Destroy() return None def raise_msg_to_str(msg): """msg is a return arg from a raise. Join with new lines""" if not is_string_like(msg): msg = '\n'.join(map(str, msg)) return msg class RendererWx(RendererBase): """ The renderer handles all the drawing primitives using a graphics context instance that controls the colors/styles. It acts as the 'renderer' instance used by many classes in the hierarchy. """ #In wxPython, drawing is performed on a wxDC instance, which will #generally be mapped to the client aread of the window displaying #the plot. Under wxPython, the wxDC instance has a wx.Pen which #describes the colour and weight of any lines drawn, and a wxBrush #which describes the fill colour of any closed polygon. fontweights = { 100 : wx.LIGHT, 200 : wx.LIGHT, 300 : wx.LIGHT, 400 : wx.NORMAL, 500 : wx.NORMAL, 600 : wx.NORMAL, 700 : wx.BOLD, 800 : wx.BOLD, 900 : wx.BOLD, 'ultralight' : wx.LIGHT, 'light' : wx.LIGHT, 'normal' : wx.NORMAL, 'medium' : wx.NORMAL, 'semibold' : wx.NORMAL, 'bold' : wx.BOLD, 'heavy' : wx.BOLD, 'ultrabold' : wx.BOLD, 'black' : wx.BOLD } fontangles = { 'italic' : wx.ITALIC, 'normal' : wx.NORMAL, 'oblique' : wx.SLANT } # wxPython allows for portable font styles, choosing them appropriately # for the target platform. Map some standard font names to the portable # styles # QUESTION: Is it be wise to agree standard fontnames across all backends? fontnames = { 'Sans' : wx.SWISS, 'Roman' : wx.ROMAN, 'Script' : wx.SCRIPT, 'Decorative' : wx.DECORATIVE, 'Modern' : wx.MODERN, 'Courier' : wx.MODERN, 'courier' : wx.MODERN } def __init__(self, bitmap, dpi): """ Initialise a wxWindows renderer instance. """ DEBUG_MSG("__init__()", 1, self) if wx.VERSION_STRING < "2.8": raise RuntimeError("matplotlib no longer supports wxPython < 2.8 for the Wx backend.\nYou may, however, use the WxAgg backend.") self.width = bitmap.GetWidth() self.height = bitmap.GetHeight() self.bitmap = bitmap self.fontd = {} self.dpi = dpi self.gc = None def flipy(self): return True def offset_text_height(self): return True def get_text_width_height_descent(self, s, prop, ismath): """ get the width and height in display coords of the string s with FontPropertry prop """ #return 1, 1 if ismath: s = self.strip_math(s) if self.gc is None: gc = self.new_gc() else: gc = self.gc gfx_ctx = gc.gfx_ctx font = self.get_wx_font(s, prop) gfx_ctx.SetFont(font, wx.BLACK) w, h, descent, leading = gfx_ctx.GetFullTextExtent(s) return w, h, descent def get_canvas_width_height(self): 'return the canvas width and height in display coords' return self.width, self.height def handle_clip_rectangle(self, gc): new_bounds = gc.get_clip_rectangle() if new_bounds is not None: new_bounds = new_bounds.bounds gfx_ctx = gc.gfx_ctx if gfx_ctx._lastcliprect != new_bounds: gfx_ctx._lastcliprect = new_bounds if new_bounds is None: gfx_ctx.ResetClip() else: gfx_ctx.Clip(new_bounds[0], self.height - new_bounds[1] - new_bounds[3], new_bounds[2], new_bounds[3]) #@staticmethod def convert_path(gfx_ctx, tpath): wxpath = gfx_ctx.CreatePath() for points, code in tpath.iter_segments(): if code == Path.MOVETO: wxpath.MoveToPoint(*points) elif code == Path.LINETO: wxpath.AddLineToPoint(*points) elif code == Path.CURVE3: wxpath.AddQuadCurveToPoint(*points) elif code == Path.CURVE4: wxpath.AddCurveToPoint(*points) elif code == Path.CLOSEPOLY: wxpath.CloseSubpath() return wxpath convert_path = staticmethod(convert_path) def draw_path(self, gc, path, transform, rgbFace=None): gc.select() self.handle_clip_rectangle(gc) gfx_ctx = gc.gfx_ctx transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height) tpath = transform.transform_path(path) wxpath = self.convert_path(gfx_ctx, tpath) if rgbFace is not None: gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace))) gfx_ctx.DrawPath(wxpath) else: gfx_ctx.StrokePath(wxpath) gc.unselect() def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None): if bbox != None: l,b,w,h = bbox.bounds else: l=0 b=0, w=self.width h=self.height rows, cols, image_str = im.as_rgba_str() image_array = npy.fromstring(image_str, npy.uint8) image_array.shape = rows, cols, 4 bitmap = wx.BitmapFromBufferRGBA(cols,rows,image_array) gc = self.get_gc() gc.select() gc.gfx_ctx.DrawBitmap(bitmap,int(l),int(b),int(w),int(h)) gc.unselect() def draw_text(self, gc, x, y, s, prop, angle, ismath): """ Render the matplotlib.text.Text instance None) """ if ismath: s = self.strip_math(s) DEBUG_MSG("draw_text()", 1, self) gc.select() self.handle_clip_rectangle(gc) gfx_ctx = gc.gfx_ctx font = self.get_wx_font(s, prop) color = gc.get_wxcolour(gc.get_rgb()) gfx_ctx.SetFont(font, color) w, h, d = self.get_text_width_height_descent(s, prop, ismath) x = int(x) y = int(y-h) if angle == 0.0: gfx_ctx.DrawText(s, x, y) else: rads = angle / 180.0 * math.pi xo = h * math.sin(rads) yo = h * math.cos(rads) gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads) gc.unselect() def new_gc(self): """ Return an instance of a GraphicsContextWx, and sets the current gc copy """ DEBUG_MSG('new_gc()', 2, self) self.gc = GraphicsContextWx(self.bitmap, self) self.gc.select() self.gc.unselect() return self.gc def get_gc(self): """ Fetch the locally cached gc. """ # This is a dirty hack to allow anything with access to a renderer to # access the current graphics context assert self.gc != None, "gc must be defined" return self.gc def get_wx_font(self, s, prop): """ Return a wx font. Cache instances in a font dictionary for efficiency """ DEBUG_MSG("get_wx_font()", 1, self) key = hash(prop) fontprop = prop fontname = fontprop.get_name() font = self.fontd.get(key) if font is not None: return font # Allow use of platform independent and dependent font names wxFontname = self.fontnames.get(fontname, wx.ROMAN) wxFacename = '' # Empty => wxPython chooses based on wx_fontname # Font colour is determined by the active wx.Pen # TODO: It may be wise to cache font information size = self.points_to_pixels(fontprop.get_size_in_points()) font =wx.Font(int(size+0.5), # Size wxFontname, # 'Generic' name self.fontangles[fontprop.get_style()], # Angle self.fontweights[fontprop.get_weight()], # Weight False, # Underline wxFacename) # Platform font name # cache the font and gc and return it self.fontd[key] = font return font def points_to_pixels(self, points): """ convert point measures to pixes using dpi and the pixels per inch of the display """ return points*(PIXELS_PER_INCH/72.0*self.dpi/72.0) class GraphicsContextWx(GraphicsContextBase): """ The graphics context provides the color, line styles, etc... This class stores a reference to a wxMemoryDC, and a wxGraphicsContext that draws to it. Creating a wxGraphicsContext seems to be fairly heavy, so these objects are cached based on the bitmap object that is passed in. The base GraphicsContext stores colors as a RGB tuple on the unit interval, eg, (0.5, 0.0, 1.0). wxPython uses an int interval, but since wxPython colour management is rather simple, I have not chosen to implement a separate colour manager class. """ _capd = { 'butt': wx.CAP_BUTT, 'projecting': wx.CAP_PROJECTING, 'round': wx.CAP_ROUND } _joind = { 'bevel': wx.JOIN_BEVEL, 'miter': wx.JOIN_MITER, 'round': wx.JOIN_ROUND } _dashd_wx = { 'solid': wx.SOLID, 'dashed': wx.SHORT_DASH, 'dashdot': wx.DOT_DASH, 'dotted': wx.DOT } _cache = weakref.WeakKeyDictionary() def __init__(self, bitmap, renderer): GraphicsContextBase.__init__(self) #assert self.Ok(), "wxMemoryDC not OK to use" DEBUG_MSG("__init__()", 1, self) dc, gfx_ctx = self._cache.get(bitmap, (None, None)) if dc is None: dc = wx.MemoryDC() dc.SelectObject(bitmap) gfx_ctx = wx.GraphicsContext.Create(dc) gfx_ctx._lastcliprect = None self._cache[bitmap] = dc, gfx_ctx self.bitmap = bitmap self.dc = dc self.gfx_ctx = gfx_ctx self._pen = wx.Pen('BLACK', 1, wx.SOLID) gfx_ctx.SetPen(self._pen) self._style = wx.SOLID self.renderer = renderer def select(self): """ Select the current bitmap into this wxDC instance """ if sys.platform=='win32': self.dc.SelectObject(self.bitmap) self.IsSelected = True def unselect(self): """ Select a Null bitmasp into this wxDC instance """ if sys.platform=='win32': self.dc.SelectObject(wx.NullBitmap) self.IsSelected = False def set_foreground(self, fg, isRGB=None): """ Set the foreground color. fg can be a matlab format string, a html hex color string, an rgb unit tuple, or a float between 0 and 1. In the latter case, grayscale is used. """ # Implementation note: wxPython has a separate concept of pen and # brush - the brush fills any outline trace left by the pen. # Here we set both to the same colour - if a figure is not to be # filled, the renderer will set the brush to be transparent # Same goes for text foreground... DEBUG_MSG("set_foreground()", 1, self) self.select() GraphicsContextBase.set_foreground(self, fg, isRGB) self._pen.SetColour(self.get_wxcolour(self.get_rgb())) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_graylevel(self, frac): """ Set the foreground color. fg can be a matlab format string, a html hex color string, an rgb unit tuple, or a float between 0 and 1. In the latter case, grayscale is used. """ DEBUG_MSG("set_graylevel()", 1, self) self.select() GraphicsContextBase.set_graylevel(self, frac) self._pen.SetColour(self.get_wxcolour(self.get_rgb())) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_linewidth(self, w): """ Set the line width. """ DEBUG_MSG("set_linewidth()", 1, self) self.select() if w>0 and w<1: w = 1 GraphicsContextBase.set_linewidth(self, w) lw = int(self.renderer.points_to_pixels(self._linewidth)) if lw==0: lw = 1 self._pen.SetWidth(lw) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_capstyle(self, cs): """ Set the capstyle as a string in ('butt', 'round', 'projecting') """ DEBUG_MSG("set_capstyle()", 1, self) self.select() GraphicsContextBase.set_capstyle(self, cs) self._pen.SetCap(GraphicsContextWx._capd[self._capstyle]) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_joinstyle(self, js): """ Set the join style to be one of ('miter', 'round', 'bevel') """ DEBUG_MSG("set_joinstyle()", 1, self) self.select() GraphicsContextBase.set_joinstyle(self, js) self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle]) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_linestyle(self, ls): """ Set the line style to be one of """ DEBUG_MSG("set_linestyle()", 1, self) self.select() GraphicsContextBase.set_linestyle(self, ls) try: self._style = GraphicsContextWx._dashd_wx[ls] except KeyError: self._style = wx.LONG_DASH# Style not used elsewhere... # On MS Windows platform, only line width of 1 allowed for dash lines if wx.Platform == '__WXMSW__': self.set_linewidth(1) self._pen.SetStyle(self._style) self.gfx_ctx.SetPen(self._pen) self.unselect() def get_wxcolour(self, color): """return a wx.Colour from RGB format""" DEBUG_MSG("get_wx_color()", 1, self) if len(color) == 3: r, g, b = color r *= 255 g *= 255 b *= 255 return wx.Colour(red=int(r), green=int(g), blue=int(b)) else: r, g, b, a = color r *= 255 g *= 255 b *= 255 a *= 255 return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a)) class FigureCanvasWx(FigureCanvasBase, wx.Panel): """ The FigureCanvas contains the figure and does event handling. In the wxPython backend, it is derived from wxPanel, and (usually) lives inside a frame instantiated by a FigureManagerWx. The parent window probably implements a wx.Sizer to control the displayed control size - but we give a hint as to our preferred minimum size. """ keyvald = { wx.WXK_CONTROL : 'control', wx.WXK_SHIFT : 'shift', wx.WXK_ALT : 'alt', wx.WXK_LEFT : 'left', wx.WXK_UP : 'up', wx.WXK_RIGHT : 'right', wx.WXK_DOWN : 'down', wx.WXK_ESCAPE : 'escape', wx.WXK_F1 : 'f1', wx.WXK_F2 : 'f2', wx.WXK_F3 : 'f3', wx.WXK_F4 : 'f4', wx.WXK_F5 : 'f5', wx.WXK_F6 : 'f6', wx.WXK_F7 : 'f7', wx.WXK_F8 : 'f8', wx.WXK_F9 : 'f9', wx.WXK_F10 : 'f10', wx.WXK_F11 : 'f11', wx.WXK_F12 : 'f12', wx.WXK_SCROLL : 'scroll_lock', wx.WXK_PAUSE : 'break', wx.WXK_BACK : 'backspace', wx.WXK_RETURN : 'enter', wx.WXK_INSERT : 'insert', wx.WXK_DELETE : 'delete', wx.WXK_HOME : 'home', wx.WXK_END : 'end', wx.WXK_PRIOR : 'pageup', wx.WXK_NEXT : 'pagedown', wx.WXK_PAGEUP : 'pageup', wx.WXK_PAGEDOWN : 'pagedown', wx.WXK_NUMPAD0 : '0', wx.WXK_NUMPAD1 : '1', wx.WXK_NUMPAD2 : '2', wx.WXK_NUMPAD3 : '3', wx.WXK_NUMPAD4 : '4', wx.WXK_NUMPAD5 : '5', wx.WXK_NUMPAD6 : '6', wx.WXK_NUMPAD7 : '7', wx.WXK_NUMPAD8 : '8', wx.WXK_NUMPAD9 : '9', wx.WXK_NUMPAD_ADD : '+', wx.WXK_NUMPAD_SUBTRACT : '-', wx.WXK_NUMPAD_MULTIPLY : '*', wx.WXK_NUMPAD_DIVIDE : '/', wx.WXK_NUMPAD_DECIMAL : 'dec', wx.WXK_NUMPAD_ENTER : 'enter', wx.WXK_NUMPAD_UP : 'up', wx.WXK_NUMPAD_RIGHT : 'right', wx.WXK_NUMPAD_DOWN : 'down', wx.WXK_NUMPAD_LEFT : 'left', wx.WXK_NUMPAD_PRIOR : 'pageup', wx.WXK_NUMPAD_NEXT : 'pagedown', wx.WXK_NUMPAD_PAGEUP : 'pageup', wx.WXK_NUMPAD_PAGEDOWN : 'pagedown', wx.WXK_NUMPAD_HOME : 'home', wx.WXK_NUMPAD_END : 'end', wx.WXK_NUMPAD_INSERT : 'insert', wx.WXK_NUMPAD_DELETE : 'delete', } def __init__(self, parent, id, figure): """ Initialise a FigureWx instance. - Initialise the FigureCanvasBase and wxPanel parents. - Set event handlers for: EVT_SIZE (Resize event) EVT_PAINT (Paint event) """ FigureCanvasBase.__init__(self, figure) # Set preferred window size hint - helps the sizer (if one is # connected) l,b,w,h = figure.bbox.bounds w = int(math.ceil(w)) h = int(math.ceil(h)) wx.Panel.__init__(self, parent, id, size=wx.Size(w, h)) def do_nothing(*args, **kwargs): warnings.warn('could not find a setinitialsize function for backend_wx; please report your wxpython version=%s to the matplotlib developers list'%backend_version) pass # try to find the set size func across wx versions try: getattr(self, 'SetInitialSize') except AttributeError: self.SetInitialSize = getattr(self, 'SetBestFittingSize', do_nothing) if not hasattr(self,'IsShownOnScreen'): self.IsShownOnScreen = getattr(self, 'IsVisible', lambda *args: True) # Create the drawing bitmap self.bitmap =wx.EmptyBitmap(w, h) DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w,h), 2, self) # TODO: Add support for 'point' inspection and plot navigation. self._isDrawn = False bind(self, wx.EVT_SIZE, self._onSize) bind(self, wx.EVT_PAINT, self._onPaint) bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground) bind(self, wx.EVT_KEY_DOWN, self._onKeyDown) bind(self, wx.EVT_KEY_UP, self._onKeyUp) bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown) bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown) bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp) bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel) bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown) bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown) bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp) bind(self, wx.EVT_MOTION, self._onMotion) bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave) bind(self, wx.EVT_ENTER_WINDOW, self._onEnter) bind(self, wx.EVT_IDLE, self._onIdle) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.macros = {} # dict from wx id to seq of macros self.Printer_Init() def Destroy(self, *args, **kwargs): wx.Panel.Destroy(self, *args, **kwargs) def Copy_to_Clipboard(self, event=None): "copy bitmap of canvas to system clipboard" bmp_obj = wx.BitmapDataObject() bmp_obj.SetBitmap(self.bitmap) wx.TheClipboard.Open() wx.TheClipboard.SetData(bmp_obj) wx.TheClipboard.Close() def Printer_Init(self): """initialize printer settings using wx methods""" self.printerData = wx.PrintData() self.printerData.SetPaperId(wx.PAPER_LETTER) self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER) self.printerPageData= wx.PageSetupDialogData() self.printerPageData.SetMarginBottomRight((25,25)) self.printerPageData.SetMarginTopLeft((25,25)) self.printerPageData.SetPrintData(self.printerData) self.printer_width = 5.5 self.printer_margin= 0.5 def Printer_Setup(self, event=None): """set up figure for printing. The standard wx Printer Setup Dialog seems to die easily. Therefore, this setup simply asks for image width and margin for printing. """ dmsg = """Width of output figure in inches. The current aspect ration will be kept.""" dlg = wx.Dialog(self, -1, 'Page Setup for Printing' , (-1,-1)) df = dlg.GetFont() df.SetWeight(wx.NORMAL) df.SetPointSize(11) dlg.SetFont(df) x_wid = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_width, size=(70,-1)) x_mrg = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_margin,size=(70,-1)) sizerAll = wx.BoxSizer(wx.VERTICAL) sizerAll.Add(wx.StaticText(dlg,-1,dmsg), 0, wx.ALL | wx.EXPAND, 5) sizer = wx.FlexGridSizer(0,3) sizerAll.Add(sizer, 0, wx.ALL | wx.EXPAND, 5) sizer.Add(wx.StaticText(dlg,-1,'Figure Width'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(x_wid, 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'in'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'Margin'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(x_mrg, 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'in'), 1, wx.ALIGN_LEFT|wx.ALL, 2) btn = wx.Button(dlg,wx.ID_OK, " OK ") btn.SetDefault() sizer.Add(btn, 1, wx.ALIGN_LEFT, 5) btn = wx.Button(dlg,wx.ID_CANCEL, " CANCEL ") sizer.Add(btn, 1, wx.ALIGN_LEFT, 5) dlg.SetSizer(sizerAll) dlg.SetAutoLayout(True) sizerAll.Fit(dlg) if dlg.ShowModal() == wx.ID_OK: try: self.printer_width = float(x_wid.GetValue()) self.printer_margin = float(x_mrg.GetValue()) except: pass if ((self.printer_width + self.printer_margin) > 7.5): self.printerData.SetOrientation(wx.LANDSCAPE) else: self.printerData.SetOrientation(wx.PORTRAIT) dlg.Destroy() return def Printer_Setup2(self, event=None): """set up figure for printing. Using the standard wx Printer Setup Dialog. """ if hasattr(self, 'printerData'): data = wx.PageSetupDialogData() data.SetPrintData(self.printerData) else: data = wx.PageSetupDialogData() data.SetMarginTopLeft( (15, 15) ) data.SetMarginBottomRight( (15, 15) ) dlg = wx.PageSetupDialog(self, data) if dlg.ShowModal() == wx.ID_OK: data = dlg.GetPageSetupData() tl = data.GetMarginTopLeft() br = data.GetMarginBottomRight() self.printerData = wx.PrintData(data.GetPrintData()) dlg.Destroy() def Printer_Preview(self, event=None): """ generate Print Preview with wx Print mechanism""" po1 = PrintoutWx(self, width=self.printer_width, margin=self.printer_margin) po2 = PrintoutWx(self, width=self.printer_width, margin=self.printer_margin) self.preview = wx.PrintPreview(po1,po2,self.printerData) if not self.preview.Ok(): print "error with preview" self.preview.SetZoom(50) frameInst= self while not isinstance(frameInst, wx.Frame): frameInst= frameInst.GetParent() frame = wx.PreviewFrame(self.preview, frameInst, "Preview") frame.Initialize() frame.SetPosition(self.GetPosition()) frame.SetSize((850,650)) frame.Centre(wx.BOTH) frame.Show(True) self.gui_repaint() def Printer_Print(self, event=None): """ Print figure using wx Print mechanism""" pdd = wx.PrintDialogData() # SetPrintData for 2.4 combatibility pdd.SetPrintData(self.printerData) pdd.SetToPage(1) printer = wx.Printer(pdd) printout = PrintoutWx(self, width=int(self.printer_width), margin=int(self.printer_margin)) print_ok = printer.Print(self, printout, True) if wx.VERSION_STRING >= '2.5': if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED: wx.MessageBox("""There was a problem printing. Perhaps your current printer is not set correctly?""", "Printing", wx.OK) else: if not print_ok: wx.MessageBox("""There was a problem printing. Perhaps your current printer is not set correctly?""", "Printing", wx.OK) printout.Destroy() self.gui_repaint() def draw_idle(self): """ Delay rendering until the GUI is idle. """ DEBUG_MSG("draw_idle()", 1, self) self._isDrawn = False # Force redraw # Create a timer for handling draw_idle requests # If there are events pending when the timer is # complete, reset the timer and continue. The # alternative approach, binding to wx.EVT_IDLE, # doesn't behave as nicely. if hasattr(self,'_idletimer'): self._idletimer.Restart(IDLE_DELAY) else: self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle) # FutureCall is a backwards-compatible alias; # CallLater became available in 2.7.1.1. def _onDrawIdle(self, *args, **kwargs): if wx.GetApp().Pending(): self._idletimer.Restart(IDLE_DELAY, *args, **kwargs) else: del self._idletimer # GUI event or explicit draw call may already # have caused the draw to take place if not self._isDrawn: self.draw(*args, **kwargs) def draw(self, drawDC=None): """ Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified. """ DEBUG_MSG("draw()", 1, self) self.renderer = RendererWx(self.bitmap, self.figure.dpi) self.figure.draw(self.renderer) self._isDrawn = True self.gui_repaint(drawDC=drawDC) def flush_events(self): wx.Yield() def start_event_loop(self, timeout=0): """ Start an event loop. This is used to start a blocking event loop so that interactive functions, such as ginput and waitforbuttonpress, can wait for events. This should not be confused with the main GUI event loop, which is always running and has nothing to do with this. Call signature:: start_event_loop(self,timeout=0) This call blocks until a callback function triggers stop_event_loop() or *timeout* is reached. If *timeout* is <=0, never timeout. Raises RuntimeError if event loop is already running. """ if hasattr(self, '_event_loop'): raise RuntimeError("Event loop already running") id = wx.NewId() timer = wx.Timer(self, id=id) if timeout > 0: timer.Start(timeout*1000, oneShot=True) bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id) # Event loop handler for start/stop event loop self._event_loop = wx.EventLoop() self._event_loop.Run() timer.Stop() def stop_event_loop(self, event=None): """ Stop an event loop. This is used to stop a blocking event loop so that interactive functions, such as ginput and waitforbuttonpress, can wait for events. Call signature:: stop_event_loop_default(self) """ if hasattr(self,'_event_loop'): if self._event_loop.IsRunning(): self._event_loop.Exit() del self._event_loop def _get_imagesave_wildcards(self): 'return the wildcard string for the filesave dialog' default_filetype = self.get_default_filetype() filetypes = self.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() wildcards = [] extensions = [] filter_index = 0 for i, (name, exts) in enumerate(sorted_filetypes): ext_list = ';'.join(['*.%s' % ext for ext in exts]) extensions.append(exts[0]) wildcard = '%s (%s)|%s' % (name, ext_list, ext_list) if default_filetype in exts: filter_index = i wildcards.append(wildcard) wildcards = '|'.join(wildcards) return wildcards, extensions, filter_index def gui_repaint(self, drawDC=None): """ Performs update of the displayed image on the GUI canvas, using the supplied device context. If drawDC is None, a ClientDC will be used to redraw the image. """ DEBUG_MSG("gui_repaint()", 1, self) if self.IsShownOnScreen(): if drawDC is None: drawDC=wx.ClientDC(self) drawDC.BeginDrawing() drawDC.DrawBitmap(self.bitmap, 0, 0) drawDC.EndDrawing() #wx.GetApp().Yield() else: pass filetypes = FigureCanvasBase.filetypes.copy() filetypes['bmp'] = 'Windows bitmap' filetypes['jpeg'] = 'JPEG' filetypes['jpg'] = 'JPEG' filetypes['pcx'] = 'PCX' filetypes['png'] = 'Portable Network Graphics' filetypes['tif'] = 'Tagged Image Format File' filetypes['tiff'] = 'Tagged Image Format File' filetypes['xpm'] = 'X pixmap' def print_figure(self, filename, *args, **kwargs): # Use pure Agg renderer to draw FigureCanvasBase.print_figure(self, filename, *args, **kwargs) # Restore the current view; this is needed because the # artist contains methods rely on particular attributes # of the rendered figure for determining things like # bounding boxes. if self._isDrawn: self.draw() def print_bmp(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs) def print_jpeg(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_JPEG, *args, **kwargs) print_jpg = print_jpeg def print_pcx(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs) def print_png(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs) def print_tiff(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_TIF, *args, **kwargs) print_tif = print_tiff def print_xpm(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs) def _print_image(self, filename, filetype, *args, **kwargs): origBitmap = self.bitmap l,b,width,height = self.figure.bbox.bounds width = int(math.ceil(width)) height = int(math.ceil(height)) self.bitmap = wx.EmptyBitmap(width, height) renderer = RendererWx(self.bitmap, self.figure.dpi) gc = renderer.new_gc() self.figure.draw(renderer) # Now that we have rendered into the bitmap, save it # to the appropriate file type and clean up if is_string_like(filename): if not self.bitmap.SaveFile(filename, filetype): DEBUG_MSG('print_figure() file save error', 4, self) raise RuntimeError('Could not save figure to %s\n' % (filename)) elif is_writable_file_like(filename): if not self.bitmap.ConvertToImage().SaveStream(filename, filetype): DEBUG_MSG('print_figure() file save error', 4, self) raise RuntimeError('Could not save figure to %s\n' % (filename)) # Restore everything to normal self.bitmap = origBitmap # Note: draw is required here since bits of state about the # last renderer are strewn about the artist draw methods. Do # not remove the draw without first verifying that these have # been cleaned up. The artist contains() methods will fail # otherwise. if self._isDrawn: self.draw() self.Refresh() def get_default_filetype(self): return 'png' def _onPaint(self, evt): """ Called when wxPaintEvt is generated """ DEBUG_MSG("_onPaint()", 1, self) drawDC = wx.PaintDC(self) if not self._isDrawn: self.draw(drawDC=drawDC) else: self.gui_repaint(drawDC=drawDC) evt.Skip() def _onEraseBackground(self, evt): """ Called when window is redrawn; since we are blitting the entire image, we can leave this blank to suppress flicker. """ pass def _onSize(self, evt): """ Called when wxEventSize is generated. In this application we attempt to resize to fit the window, so it is better to take the performance hit and redraw the whole window. """ DEBUG_MSG("_onSize()", 2, self) # Create a new, correctly sized bitmap self._width, self._height = self.GetClientSize() self.bitmap =wx.EmptyBitmap(self._width, self._height) self._isDrawn = False if self._width <= 1 or self._height <= 1: return # Empty figure dpival = self.figure.dpi winch = self._width/dpival hinch = self._height/dpival self.figure.set_size_inches(winch, hinch) # Rendering will happen on the associated paint event # so no need to do anything here except to make sure # the whole background is repainted. self.Refresh(eraseBackground=False) def _get_key(self, evt): keyval = evt.m_keyCode if keyval in self.keyvald: key = self.keyvald[keyval] elif keyval <256: key = chr(keyval) else: key = None # why is wx upcasing this? if key is not None: key = key.lower() return key def _onIdle(self, evt): 'a GUI idle event' evt.Skip() FigureCanvasBase.idle_event(self, guiEvent=evt) def _onKeyDown(self, evt): """Capture key press.""" key = self._get_key(evt) evt.Skip() FigureCanvasBase.key_press_event(self, key, guiEvent=evt) def _onKeyUp(self, evt): """Release key.""" key = self._get_key(evt) #print 'release key', key evt.Skip() FigureCanvasBase.key_release_event(self, key, guiEvent=evt) def _onRightButtonDown(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() self.CaptureMouse() FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt) def _onRightButtonUp(self, evt): """End measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() if self.HasCapture(): self.ReleaseMouse() FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt) def _onLeftButtonDown(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() self.CaptureMouse() FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt) def _onLeftButtonUp(self, evt): """End measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() #print 'release button', 1 evt.Skip() if self.HasCapture(): self.ReleaseMouse() FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt) def _onMouseWheel(self, evt): """Translate mouse wheel events into matplotlib events""" # Determine mouse location x = evt.GetX() y = self.figure.bbox.height - evt.GetY() # Convert delta/rotation/rate into a floating point step size delta = evt.GetWheelDelta() rotation = evt.GetWheelRotation() rate = evt.GetLinesPerAction() #print "delta,rotation,rate",delta,rotation,rate step = rate*float(rotation)/delta # Done handling event evt.Skip() # Mac is giving two events for every wheel event # Need to skip every second one if wx.Platform == '__WXMAC__': if not hasattr(self,'_skipwheelevent'): self._skipwheelevent = True elif self._skipwheelevent: self._skipwheelevent = False return # Return without processing event else: self._skipwheelevent = True # Convert to mpl event FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt) def _onMotion(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt) def _onLeave(self, evt): """Mouse has left the window.""" evt.Skip() FigureCanvasBase.leave_notify_event(self, guiEvent = evt) def _onEnter(self, evt): """Mouse has entered the window.""" FigureCanvasBase.enter_notify_event(self, guiEvent = evt) ######################################################################## # # The following functions and classes are for pylab compatibility # mode (matplotlib.pylab) and implement figure managers, etc... # ######################################################################## def _create_wx_app(): """ Creates a wx.PySimpleApp instance if a wx.App has not been created. """ wxapp = wx.GetApp() if wxapp is None: wxapp = wx.PySimpleApp() wxapp.SetExitOnFrameDelete(True) # retain a reference to the app object so it does not get garbage # collected and cause segmentation faults _create_wx_app.theWxApp = wxapp def draw_if_interactive(): """ This should be overriden in a windowing environment if drawing should be done in interactive python mode """ DEBUG_MSG("draw_if_interactive()", 1, None) if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw() def show(): """ Current implementation assumes that matplotlib is executed in a PyCrust shell. It appears to be possible to execute wxPython applications from within a PyCrust without having to ensure that wxPython has been created in a secondary thread (e.g. SciPy gui_thread). Unfortunately, gui_thread seems to introduce a number of further dependencies on SciPy modules, which I do not wish to introduce into the backend at this point. If there is a need I will look into this in a later release. """ DEBUG_MSG("show()", 3, None) for figwin in Gcf.get_all_fig_managers(): figwin.frame.Show() if show._needmain and not matplotlib.is_interactive(): # start the wxPython gui event if there is not already one running wxapp = wx.GetApp() if wxapp is not None: # wxPython 2.4 has no wx.App.IsMainLoopRunning() method imlr = getattr(wxapp, 'IsMainLoopRunning', lambda: False) if not imlr(): wxapp.MainLoop() show._needmain = False show._needmain = True def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ # in order to expose the Figure constructor to the pylab # interface we need to create the figure here DEBUG_MSG("new_figure_manager()", 3, None) _create_wx_app() FigureClass = kwargs.pop('FigureClass', Figure) fig = FigureClass(*args, **kwargs) frame = FigureFrameWx(num, fig) figmgr = frame.get_figure_manager() if matplotlib.is_interactive(): figmgr.frame.Show() return figmgr class FigureFrameWx(wx.Frame): def __init__(self, num, fig): # On non-Windows platform, explicitly set the position - fix # positioning bug on some Linux platforms if wx.Platform == '__WXMSW__': pos = wx.DefaultPosition else: pos =wx.Point(20,20) l,b,w,h = fig.bbox.bounds wx.Frame.__init__(self, parent=None, id=-1, pos=pos, title="Figure %d" % num) # Frame will be sized later by the Fit method DEBUG_MSG("__init__()", 1, self) self.num = num statbar = StatusBarWx(self) self.SetStatusBar(statbar) self.canvas = self.get_canvas(fig) self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height)) self.sizer =wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND) # By adding toolbar in sizer, we are able to put it at the bottom # of the frame - so appearance is closer to GTK version self.toolbar = self._get_toolbar(statbar) if self.toolbar is not None: self.toolbar.Realize() if wx.Platform == '__WXMAC__': # Mac platform (OSX 10.3, MacPython) does not seem to cope with # having a toolbar in a sizer. This work-around gets the buttons # back, but at the expense of having the toolbar at the top self.SetToolBar(self.toolbar) else: # On Windows platform, default window size is incorrect, so set # toolbar width to figure width. tw, th = self.toolbar.GetSizeTuple() fw, fh = self.canvas.GetSizeTuple() # By adding toolbar in sizer, we are able to put it at the bottom # of the frame - so appearance is closer to GTK version. # As noted above, doesn't work for Mac. self.toolbar.SetSize(wx.Size(fw, th)) self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND) self.SetSizer(self.sizer) self.Fit() self.figmgr = FigureManagerWx(self.canvas, num, self) bind(self, wx.EVT_CLOSE, self._onClose) def _get_toolbar(self, statbar): if matplotlib.rcParams['toolbar']=='classic': toolbar = NavigationToolbarWx(self.canvas, True) elif matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2Wx(self.canvas) toolbar.set_status_bar(statbar) else: toolbar = None return toolbar def get_canvas(self, fig): return FigureCanvasWx(self, -1, fig) def get_figure_manager(self): DEBUG_MSG("get_figure_manager()", 1, self) return self.figmgr def _onClose(self, evt): DEBUG_MSG("onClose()", 1, self) self.canvas.stop_event_loop() Gcf.destroy(self.num) #self.Destroy() def GetToolBar(self): """Override wxFrame::GetToolBar as we don't have managed toolbar""" return self.toolbar def Destroy(self, *args, **kwargs): wx.Frame.Destroy(self, *args, **kwargs) if self.toolbar is not None: self.toolbar.Destroy() wxapp = wx.GetApp() if wxapp: wxapp.Yield() return True class FigureManagerWx(FigureManagerBase): """ This class contains the FigureCanvas and GUI frame It is instantiated by GcfWx whenever a new figure is created. GcfWx is responsible for managing multiple instances of FigureManagerWx. NB: FigureManagerBase is found in _pylab_helpers public attrs canvas - a FigureCanvasWx(wx.Panel) instance window - a wxFrame instance - http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin_wxframe.html#wxframe """ def __init__(self, canvas, num, frame): DEBUG_MSG("__init__()", 1, self) FigureManagerBase.__init__(self, canvas, num) self.frame = frame self.window = frame self.tb = frame.GetToolBar() self.toolbar = self.tb # consistent with other backends def notify_axes_change(fig): 'this will be called whenever the current axes is changed' if self.tb != None: self.tb.update() self.canvas.figure.add_axobserver(notify_axes_change) def showfig(*args): frame.Show() # attach a show method to the figure self.canvas.figure.show = showfig def destroy(self, *args): DEBUG_MSG("destroy()", 1, self) self.frame.Destroy() #if self.tb is not None: self.tb.Destroy() import wx #wx.GetApp().ProcessIdle() wx.WakeUpIdle() def set_window_title(self, title): self.window.SetTitle(title) def resize(self, width, height): 'Set the canvas size in pixels' self.canvas.SetInitialSize(wx.Size(width, height)) self.window.GetSizer().Fit(self.window) # Identifiers for toolbar controls - images_wx contains bitmaps for the images # used in the controls. wxWindows does not provide any stock images, so I've # 'stolen' those from GTK2, and transformed them into the appropriate format. #import images_wx _NTB_AXISMENU =wx.NewId() _NTB_AXISMENU_BUTTON =wx.NewId() _NTB_X_PAN_LEFT =wx.NewId() _NTB_X_PAN_RIGHT =wx.NewId() _NTB_X_ZOOMIN =wx.NewId() _NTB_X_ZOOMOUT =wx.NewId() _NTB_Y_PAN_UP =wx.NewId() _NTB_Y_PAN_DOWN =wx.NewId() _NTB_Y_ZOOMIN =wx.NewId() _NTB_Y_ZOOMOUT =wx.NewId() #_NTB_SUBPLOT =wx.NewId() _NTB_SAVE =wx.NewId() _NTB_CLOSE =wx.NewId() def _load_bitmap(filename): """ Load a bitmap file from the backends/images subdirectory in which the matplotlib library is installed. The filename parameter should not contain any path information as this is determined automatically. Returns a wx.Bitmap object """ basedir = os.path.join(rcParams['datapath'],'images') bmpFilename = os.path.normpath(os.path.join(basedir, filename)) if not os.path.exists(bmpFilename): raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename) bmp = wx.Bitmap(bmpFilename) return bmp class MenuButtonWx(wx.Button): """ wxPython does not permit a menu to be incorporated directly into a toolbar. This class simulates the effect by associating a pop-up menu with a button in the toolbar, and managing this as though it were a menu. """ def __init__(self, parent): wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ", style=wx.BU_EXACTFIT) self._toolbar = parent self._menu =wx.Menu() self._axisId = [] # First two menu items never change... self._allId =wx.NewId() self._invertId =wx.NewId() self._menu.Append(self._allId, "All", "Select all axes", False) self._menu.Append(self._invertId, "Invert", "Invert axes selected", False) self._menu.AppendSeparator() bind(self, wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON) bind(self, wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId) bind(self, wx.EVT_MENU, self._handleInvertAxesSelected, id=self._invertId) def Destroy(self): self._menu.Destroy() self.Destroy() def _onMenuButton(self, evt): """Handle menu button pressed.""" x, y = self.GetPositionTuple() w, h = self.GetSizeTuple() self.PopupMenuXY(self._menu, x, y+h-4) # When menu returned, indicate selection in button evt.Skip() def _handleSelectAllAxes(self, evt): """Called when the 'select all axes' menu item is selected.""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def _handleInvertAxesSelected(self, evt): """Called when the invert all menu item is selected""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): self._menu.Check(self._axisId[i], False) else: self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def _onMenuItemSelected(self, evt): """Called whenever one of the specific axis menu items is selected""" current = self._menu.IsChecked(evt.GetId()) if current: new = False else: new = True self._menu.Check(evt.GetId(), new) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def updateAxes(self, maxAxis): """Ensures that there are entries for max_axis axes in the menu (selected by default).""" if maxAxis > len(self._axisId): for i in range(len(self._axisId) + 1, maxAxis + 1, 1): menuId =wx.NewId() self._axisId.append(menuId) self._menu.Append(menuId, "Axis %d" % i, "Select axis %d" % i, True) self._menu.Check(menuId, True) bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId) self._toolbar.set_active(range(len(self._axisId))) def getActiveAxes(self): """Return a list of the selected axes.""" active = [] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): active.append(i) return active def updateButtonText(self, lst): """Update the list of selected axes in the menu button""" axis_txt = '' for e in lst: axis_txt += '%d,' % (e+1) # remove trailing ',' and add to button string self.SetLabel("Axes: %s" % axis_txt[:-1]) cursord = { cursors.MOVE : wx.CURSOR_HAND, cursors.HAND : wx.CURSOR_HAND, cursors.POINTER : wx.CURSOR_ARROW, cursors.SELECT_REGION : wx.CURSOR_CROSS, } class SubplotToolWX(wx.Frame): def __init__(self, targetfig): wx.Frame.__init__(self, None, -1, "Configure subplots") toolfig = Figure((6,3)) canvas = FigureCanvasWx(self, -1, toolfig) # Create a figure manager to manage things figmgr = FigureManager(canvas, 1, self) # Now put all into a sizer sizer = wx.BoxSizer(wx.VERTICAL) # This way of adding to sizer allows resizing sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW) self.SetSizer(sizer) self.Fit() tool = SubplotTool(targetfig, toolfig) class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar): def __init__(self, canvas): wx.ToolBar.__init__(self, canvas.GetParent(), -1) NavigationToolbar2.__init__(self, canvas) self.canvas = canvas self._idle = True self.statbar = None def get_canvas(self, frame, fig): return FigureCanvasWx(frame, -1, fig) def _init_toolbar(self): DEBUG_MSG("_init_toolbar", 1, self) self._parent = self.canvas.GetParent() _NTB2_HOME =wx.NewId() self._NTB2_BACK =wx.NewId() self._NTB2_FORWARD =wx.NewId() self._NTB2_PAN =wx.NewId() self._NTB2_ZOOM =wx.NewId() _NTB2_SAVE = wx.NewId() _NTB2_SUBPLOT =wx.NewId() self.SetToolBitmapSize(wx.Size(24,24)) self.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'), 'Home', 'Reset original view') self.AddSimpleTool(self._NTB2_BACK, _load_bitmap('back.png'), 'Back', 'Back navigation view') self.AddSimpleTool(self._NTB2_FORWARD, _load_bitmap('forward.png'), 'Forward', 'Forward navigation view') # todo: get new bitmap self.AddCheckTool(self._NTB2_PAN, _load_bitmap('move.png'), shortHelp='Pan', longHelp='Pan with left, zoom with right') self.AddCheckTool(self._NTB2_ZOOM, _load_bitmap('zoom_to_rect.png'), shortHelp='Zoom', longHelp='Zoom to rectangle') self.AddSeparator() self.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'), 'Configure subplots', 'Configure subplot parameters') self.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'), 'Save', 'Save plot contents to file') bind(self, wx.EVT_TOOL, self.home, id=_NTB2_HOME) bind(self, wx.EVT_TOOL, self.forward, id=self._NTB2_FORWARD) bind(self, wx.EVT_TOOL, self.back, id=self._NTB2_BACK) bind(self, wx.EVT_TOOL, self.zoom, id=self._NTB2_ZOOM) bind(self, wx.EVT_TOOL, self.pan, id=self._NTB2_PAN) bind(self, wx.EVT_TOOL, self.configure_subplot, id=_NTB2_SUBPLOT) bind(self, wx.EVT_TOOL, self.save, id=_NTB2_SAVE) self.Realize() def zoom(self, *args): self.ToggleTool(self._NTB2_PAN, False) NavigationToolbar2.zoom(self, *args) def pan(self, *args): self.ToggleTool(self._NTB2_ZOOM, False) NavigationToolbar2.pan(self, *args) def configure_subplot(self, evt): frame = wx.Frame(None, -1, "Configure subplots") toolfig = Figure((6,3)) canvas = self.get_canvas(frame, toolfig) # Create a figure manager to manage things figmgr = FigureManager(canvas, 1, frame) # Now put all into a sizer sizer = wx.BoxSizer(wx.VERTICAL) # This way of adding to sizer allows resizing sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW) frame.SetSizer(sizer) frame.Fit() tool = SubplotTool(self.canvas.figure, toolfig) frame.Show() def save(self, evt): # Fetch the required filename and file type. filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards() default_file = "image." + self.canvas.get_default_filetype() dlg = wx.FileDialog(self._parent, "Save to file", "", default_file, filetypes, wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR) dlg.SetFilterIndex(filter_index) if dlg.ShowModal() == wx.ID_OK: dirname = dlg.GetDirectory() filename = dlg.GetFilename() DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self) format = exts[dlg.GetFilterIndex()] basename, ext = os.path.splitext(filename) if ext.startswith('.'): ext = ext[1:] if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format!=ext: #looks like they forgot to set the image type drop #down, going with the extension. warnings.warn('extension %s did not match the selected image type %s; going with %s'%(ext, format, ext), stacklevel=0) format = ext try: self.canvas.print_figure( os.path.join(dirname, filename), format=format) except Exception, e: error_msg_wx(str(e)) def set_cursor(self, cursor): cursor =wx.StockCursor(cursord[cursor]) self.canvas.SetCursor( cursor ) def release(self, event): try: del self.lastrect except AttributeError: pass def dynamic_update(self): d = self._idle self._idle = False if d: self.canvas.draw() self._idle = True def draw_rubberband(self, event, x0, y0, x1, y1): 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744' canvas = self.canvas dc =wx.ClientDC(canvas) # Set logical function to XOR for rubberbanding dc.SetLogicalFunction(wx.XOR) # Set dc brush and pen # Here I set brush and pen to white and grey respectively # You can set it to your own choices # The brush setting is not really needed since we # dont do any filling of the dc. It is set just for # the sake of completion. wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT) wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID) dc.SetBrush(wbrush) dc.SetPen(wpen) dc.ResetBoundingBox() dc.BeginDrawing() height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 if y1<y0: y0, y1 = y1, y0 if x1<y0: x0, x1 = x1, x0 w = x1 - x0 h = y1 - y0 rect = int(x0), int(y0), int(w), int(h) try: lastrect = self.lastrect except AttributeError: pass else: dc.DrawRectangle(*lastrect) #erase last self.lastrect = rect dc.DrawRectangle(*rect) dc.EndDrawing() def set_status_bar(self, statbar): self.statbar = statbar def set_message(self, s): if self.statbar is not None: self.statbar.set_function(s) def set_history_buttons(self): can_backward = (self._views._pos > 0) can_forward = (self._views._pos < len(self._views._elements) - 1) self.EnableTool(self._NTB2_BACK, can_backward) self.EnableTool(self._NTB2_FORWARD, can_forward) class NavigationToolbarWx(wx.ToolBar): def __init__(self, canvas, can_kill=False): """ figure is the Figure instance that the toolboar controls win, if not None, is the wxWindow the Figure is embedded in """ wx.ToolBar.__init__(self, canvas.GetParent(), -1) DEBUG_MSG("__init__()", 1, self) self.canvas = canvas self._lastControl = None self._mouseOnButton = None self._parent = canvas.GetParent() self._NTB_BUTTON_HANDLER = { _NTB_X_PAN_LEFT : self.panx, _NTB_X_PAN_RIGHT : self.panx, _NTB_X_ZOOMIN : self.zoomx, _NTB_X_ZOOMOUT : self.zoomy, _NTB_Y_PAN_UP : self.pany, _NTB_Y_PAN_DOWN : self.pany, _NTB_Y_ZOOMIN : self.zoomy, _NTB_Y_ZOOMOUT : self.zoomy } self._create_menu() self._create_controls(can_kill) self.Realize() def _create_menu(self): """ Creates the 'menu' - implemented as a button which opens a pop-up menu since wxPython does not allow a menu as a control """ DEBUG_MSG("_create_menu()", 1, self) self._menu = MenuButtonWx(self) self.AddControl(self._menu) self.AddSeparator() def _create_controls(self, can_kill): """ Creates the button controls, and links them to event handlers """ DEBUG_MSG("_create_controls()", 1, self) # Need the following line as Windows toolbars default to 15x16 self.SetToolBitmapSize(wx.Size(16,16)) self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap('stock_left.xpm'), 'Left', 'Scroll left') self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap('stock_right.xpm'), 'Right', 'Scroll right') self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'), 'Zoom in', 'Increase X axis magnification') self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'), 'Zoom out', 'Decrease X axis magnification') self.AddSeparator() self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap('stock_up.xpm'), 'Up', 'Scroll up') self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap('stock_down.xpm'), 'Down', 'Scroll down') self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'), 'Zoom in', 'Increase Y axis magnification') self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'), 'Zoom out', 'Decrease Y axis magnification') self.AddSeparator() self.AddSimpleTool(_NTB_SAVE, _load_bitmap('stock_save_as.xpm'), 'Save', 'Save plot contents as images') self.AddSeparator() bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT) bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT) bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN) bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT) bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP) bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN) bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN) bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT) bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE) bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId()) if can_kill: bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE) bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel) def set_active(self, ind): """ ind is a list of index numbers for the axes which are to be made active """ DEBUG_MSG("set_active()", 1, self) self._ind = ind if ind != None: self._active = [ self._axes[i] for i in self._ind ] else: self._active = [] # Now update button text wit active axes self._menu.updateButtonText(ind) def get_last_control(self): """Returns the identity of the last toolbar button pressed.""" return self._lastControl def panx(self, direction): DEBUG_MSG("panx()", 1, self) for a in self._active: a.xaxis.pan(direction) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def pany(self, direction): DEBUG_MSG("pany()", 1, self) for a in self._active: a.yaxis.pan(direction) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def zoomx(self, in_out): DEBUG_MSG("zoomx()", 1, self) for a in self._active: a.xaxis.zoom(in_out) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def zoomy(self, in_out): DEBUG_MSG("zoomy()", 1, self) for a in self._active: a.yaxis.zoom(in_out) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def update(self): """ Update the toolbar menu - called when (e.g.) a new subplot or axes are added """ DEBUG_MSG("update()", 1, self) self._axes = self.canvas.figure.get_axes() self._menu.updateAxes(len(self._axes)) def _do_nothing(self, d): """A NULL event handler - does nothing whatsoever""" pass # Local event handlers - mainly supply parameters to pan/scroll functions def _onEnterTool(self, evt): toolId = evt.GetSelection() try: self.button_fn = self._NTB_BUTTON_HANDLER[toolId] except KeyError: self.button_fn = self._do_nothing evt.Skip() def _onLeftScroll(self, evt): self.panx(-1) evt.Skip() def _onRightScroll(self, evt): self.panx(1) evt.Skip() def _onXZoomIn(self, evt): self.zoomx(1) evt.Skip() def _onXZoomOut(self, evt): self.zoomx(-1) evt.Skip() def _onUpScroll(self, evt): self.pany(1) evt.Skip() def _onDownScroll(self, evt): self.pany(-1) evt.Skip() def _onYZoomIn(self, evt): self.zoomy(1) evt.Skip() def _onYZoomOut(self, evt): self.zoomy(-1) evt.Skip() def _onMouseEnterButton(self, button): self._mouseOnButton = button def _onMouseLeaveButton(self, button): if self._mouseOnButton == button: self._mouseOnButton = None def _onMouseWheel(self, evt): if evt.GetWheelRotation() > 0: direction = 1 else: direction = -1 self.button_fn(direction) _onSave = NavigationToolbar2Wx.save def _onClose(self, evt): self.GetParent().Destroy() class StatusBarWx(wx.StatusBar): """ A status bar is added to _FigureFrame to allow measurements and the previously selected scroll function to be displayed as a user convenience. """ def __init__(self, parent): wx.StatusBar.__init__(self, parent, -1) self.SetFieldsCount(2) self.SetStatusText("None", 1) #self.SetStatusText("Measurement: None", 2) #self.Reposition() def set_function(self, string): self.SetStatusText("%s" % string, 1) #def set_measurement(self, string): # self.SetStatusText("Measurement: %s" % string, 2) #< Additions for printing support: Matt Newville class PrintoutWx(wx.Printout): """Simple wrapper around wx Printout class -- all the real work here is scaling the matplotlib canvas bitmap to the current printer's definition. """ def __init__(self, canvas, width=5.5,margin=0.5, title='matplotlib'): wx.Printout.__init__(self,title=title) self.canvas = canvas # width, in inches of output figure (approximate) self.width = width self.margin = margin def HasPage(self, page): #current only supports 1 page print return page == 1 def GetPageInfo(self): return (1, 1, 1, 1) def OnPrintPage(self, page): self.canvas.draw() dc = self.GetDC() (ppw,pph) = self.GetPPIPrinter() # printer's pixels per in (pgw,pgh) = self.GetPageSizePixels() # page size in pixels (dcw,dch) = dc.GetSize() (grw,grh) = self.canvas.GetSizeTuple() # save current figure dpi resolution and bg color, # so that we can temporarily set them to the dpi of # the printer, and the bg color to white bgcolor = self.canvas.figure.get_facecolor() fig_dpi = self.canvas.figure.dpi # draw the bitmap, scaled appropriately vscale = float(ppw) / fig_dpi # set figure resolution,bg color for printer self.canvas.figure.dpi = ppw self.canvas.figure.set_facecolor('#FFFFFF') renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi) self.canvas.figure.draw(renderer) self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale)) self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale)) self.canvas.draw() # page may need additional scaling on preview page_scale = 1.0 if self.IsPreview(): page_scale = float(dcw)/pgw # get margin in pixels = (margin in in) * (pixels/in) top_margin = int(self.margin * pph * page_scale) left_margin = int(self.margin * ppw * page_scale) # set scale so that width of output is self.width inches # (assuming grw is size of graph in inches....) user_scale = (self.width * fig_dpi * page_scale)/float(grw) dc.SetDeviceOrigin(left_margin,top_margin) dc.SetUserScale(user_scale,user_scale) # this cute little number avoid API inconsistencies in wx try: dc.DrawBitmap(self.canvas.bitmap, 0, 0) except: try: dc.DrawBitmap(self.canvas.bitmap, (0, 0)) except: pass # restore original figure resolution self.canvas.figure.set_facecolor(bgcolor) self.canvas.figure.dpi = fig_dpi self.canvas.draw() return True #> ######################################################################## # # Now just provide the standard names that backend.__init__ is expecting # ######################################################################## Toolbar = NavigationToolbarWx FigureManager = FigureManagerWx
agpl-3.0
jonparrott/gcloud-python
docs/conf.py
2
11579
# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # google-cloud documentation build configuration file, created by # sphinx-quickstart on Tue Jan 21 22:24:47 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import email import os import pkg_resources import shutil from sphinx.util import logging logger = logging.getLogger(__name__) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.6.3' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # Allow markdown includes (so releases.md can include CHANGLEOG.md) # http://www.sphinx-doc.org/en/master/markdown.html source_parsers = { '.md': 'recommonmark.parser.CommonMarkParser', } # The suffix of source filenames. source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'google-cloud' copyright = u'2014-2017, Google' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. distro = pkg_resources.get_distribution('google-cloud-core') release = os.getenv('SPHINX_RELEASE', distro.version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'description': 'Google Cloud Client Libraries for Python', 'github_user': 'GoogleCloudPlatform', 'github_repo': 'google-cloud-python', 'github_banner': True, 'font_family': "'Roboto', Georgia, sans", 'head_font_family': "'Roboto', Georgia, serif", 'code_font_family': "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/images/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_add_permalinks = '#' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'google-cloud-doc' html_context = {} # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } metadata = distro.get_metadata(distro.PKG_INFO) author = email.message_from_string(metadata).get('Author') # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'google-cloud.tex', u'google-cloud Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress ths to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 'ref.python' ] # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'google-cloud', u'google-cloud Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'google-cloud', u'google-cloud Documentation', author, 'google-cloud', 'Python API for Google Cloud.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # This pulls class descriptions from the class docstring, # and parameter definitions from the __init__ docstring. autoclass_content = 'both' # Automatically generate API reference stubs from autosummary. # http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically autosummary_generate = True # Configuration for intersphinx: intersphinx_mapping = { 'google-auth': ('https://google-auth.readthedocs.io/en/stable', None), 'google-gax': ('https://gax-python.readthedocs.io/en/latest/', None), 'grpc': ('https://grpc.io/grpc/python/', None), 'requests': ('http://docs.python-requests.org/en/master/', None), 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/3', None), } # Static HTML pages, e.g. to support redirects # See: https://tech.signavio.com/2017/managing-sphinx-redirects # HTML pages to be copied from source to target static_html_pages = [ 'datastore/usage.html', 'dns/usage.html', 'bigquery/usage.html', 'runtimeconfig/usage.html', 'spanner/usage.html', 'trace/starting.html', ] def copy_static_html_pages(app, exception): if exception is None and app.builder.name == 'html': for static_html_page in static_html_pages: target_path = app.outdir + '/' + static_html_page src_path = app.srcdir + '/' + static_html_page if os.path.isfile(src_path): logger.info( 'Copying static html: %s -> %s', src_path, target_path) shutil.copyfile(src_path, target_path) def setup(app): app.connect('build-finished', copy_static_html_pages)
apache-2.0