repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
7
content
stringlengths
711
1.04M
license
stringclasses
15 values
hash
int64
-9,223,328,406,218,787,000
9,223,331,109B
line_mean
float64
5.74
99.7
line_max
int64
17
1k
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
OMS-NetZero/FAIR
fair/gas_cycle/gir.py
1
2430
from __future__ import division import numpy as np from ..constants.general import ppm_gtc """Gas cycle functions from Generalised Impulse Response Model v1.0.0. Much of this has been adapted from: Leach et al., 2020, Geoscientific Model Development https://www.geosci-model-dev-discuss.net/gmd-2019-379/ """ def calculate_alpha(cumulative_emissions,airborne_emissions,temperature,r0,rC,rT,g0,g1,iirf_max = 97.0): """ Calculate CO2 time constant scaling factor. Inputs: cumulative_emissions: GtC cumulative emissions since pre-industrial. airborne_emissions: GtC total emissions remaining in the atmosphere. temperature: K temperature anomaly since pre-industrial. r0: pre-industrial 100-year time-integrated airborne fraction. rC: sensitivity of 100-year time-integrated airborne fraction with atmospheric carbon stock. rT: sensitivity of 100-year time-integrated airborne fraction with temperature anomaly. g0: parameter for alpha g1: parameter for alpha Keywords: iirf_max: maximum allowable value to 100-year time-integrated airborne fraction Outputs: alpha: scaling factor. """ iirf = r0 + rC * (cumulative_emissions-airborne_emissions) + rT * temperature iirf = (iirf>iirf_max) * iirf_max + iirf * (iirf<iirf_max) alpha = g0 * np.sinh(iirf / g1) return alpha def step_concentration(carbon_boxes0,emissions,alpha,a,tau,Cpi,dt=1): """ Calculate concentrations from emissions. Inputs: carbon_boxes0: CO2 boxes at the end of the previous timestep. emissions: GtC CO2 emissions this timestep. alpha: CO2 time constant scaling factor. a: CO2 partitioning coefficient tau: CO2 atmospheric time constants (unscaled). Cpi: pre-industrial CO2 concentrations (ppm). Keywords: dt: timestep in years. Outputs: C: CO2 concentration (ppm) carbon_boxes1: CO2 boxes at the end of this timestep. airbone_emissions: GtC total emissions remaining in atmosphere. """ carbon_boxes1 = emissions / ppm_gtc * a * alpha * (tau/dt) * ( 1. - np.exp(-dt/(alpha*tau))) + carbon_boxes0 * np.exp(-dt/(alpha*tau)) C = Cpi + np.sum(carbon_boxes1 + carbon_boxes0) / 2 airborne_emissions = np.sum(carbon_boxes1) * ppm_gtc return C, carbon_boxes1, airborne_emissions
apache-2.0
-8,292,986,400,798,594,000
37.571429
104
0.680247
false
mathkann/hyperopt
hyperopt/tests/test_criteria.py
7
1917
import numpy as np import hyperopt.criteria as crit def test_ei(): rng = np.random.RandomState(123) for mean, var in [(0, 1), (-4, 9)]: thresholds = np.arange(-5, 5, .25) * np.sqrt(var) + mean v_n = [crit.EI_gaussian_empirical(mean, var, thresh, rng, 10000) for thresh in thresholds] v_a = [crit.EI_gaussian(mean, var, thresh) for thresh in thresholds] #import matplotlib.pyplot as plt #plt.plot(thresholds, v_n) #plt.plot(thresholds, v_a) #plt.show() if not np.allclose(v_n, v_a, atol=0.03, rtol=0.03): for t, n, a in zip(thresholds, v_n, v_a): print t, n, a, abs(n - a), abs(n - a) / (abs(n) + abs(a)) assert 0 #mean, var, thresh, v_n, v_a) def test_log_ei(): for mean, var in [(0, 1), (-4, 9)]: thresholds = np.arange(-5, 30, .25) * np.sqrt(var) + mean ei = np.asarray( [crit.EI_gaussian(mean, var, thresh) for thresh in thresholds]) nlei = np.asarray( [crit.logEI_gaussian(mean, var, thresh) for thresh in thresholds]) naive = np.log(ei) #import matplotlib.pyplot as plt #plt.plot(thresholds, ei, label='ei') #plt.plot(thresholds, nlei, label='nlei') #plt.plot(thresholds, naive, label='naive') #plt.legend() #plt.show() # -- assert that they match when the threshold isn't too high assert np.allclose(nlei, naive) def test_log_ei_range(): assert np.all( np.isfinite( [crit.logEI_gaussian(0, 1, thresh) for thresh in [-500, 0, 50, 100, 500, 5000]])) def test_ucb(): assert np.allclose(crit.UCB(0, 1, 1), 1) assert np.allclose(crit.UCB(0, 1, 2), 2) assert np.allclose(crit.UCB(0, 4, 1), 2) assert np.allclose(crit.UCB(1, 4, 1), 3) # -- flake8
bsd-3-clause
-1,589,826,703,282,367,200
29.919355
73
0.542514
false
cython-testbed/pandas
pandas/tests/scalar/timestamp/test_timezones.py
1
12514
# -*- coding: utf-8 -*- """ Tests for Timestamp timezone-related methods """ from datetime import datetime, date, timedelta from distutils.version import LooseVersion import pytest import pytz from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError import dateutil from dateutil.tz import gettz, tzoffset import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import Timestamp, NaT from pandas.errors import OutOfBoundsDatetime class TestTimestampTZOperations(object): # -------------------------------------------------------------- # Timestamp.tz_localize def test_tz_localize_pushes_out_of_bounds(self): # GH#12677 # tz_localize that pushes away from the boundary is OK pac = Timestamp.min.tz_localize('US/Pacific') assert pac.value > Timestamp.min.value pac.tz_convert('Asia/Tokyo') # tz_convert doesn't change value with pytest.raises(OutOfBoundsDatetime): Timestamp.min.tz_localize('Asia/Tokyo') # tz_localize that pushes away from the boundary is OK tokyo = Timestamp.max.tz_localize('Asia/Tokyo') assert tokyo.value < Timestamp.max.value tokyo.tz_convert('US/Pacific') # tz_convert doesn't change value with pytest.raises(OutOfBoundsDatetime): Timestamp.max.tz_localize('US/Pacific') def test_tz_localize_ambiguous_bool(self): # make sure that we are correctly accepting bool values as ambiguous # GH#14402 ts = Timestamp('2015-11-01 01:00:03') expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') with pytest.raises(pytz.AmbiguousTimeError): ts.tz_localize('US/Central') result = ts.tz_localize('US/Central', ambiguous=True) assert result == expected0 result = ts.tz_localize('US/Central', ambiguous=False) assert result == expected1 def test_tz_localize_ambiguous(self): ts = Timestamp('2014-11-02 01:00') ts_dst = ts.tz_localize('US/Eastern', ambiguous=True) ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False) assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600 with pytest.raises(ValueError): ts.tz_localize('US/Eastern', ambiguous='infer') # GH#8025 with tm.assert_raises_regex(TypeError, 'Cannot localize tz-aware Timestamp, ' 'use tz_convert for conversions'): Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo') with tm.assert_raises_regex(TypeError, 'Cannot convert tz-naive Timestamp, ' 'use tz_localize to localize'): Timestamp('2011-01-01').tz_convert('Asia/Tokyo') @pytest.mark.parametrize('stamp, tz', [ ('2015-03-08 02:00', 'US/Eastern'), ('2015-03-08 02:30', 'US/Pacific'), ('2015-03-29 02:00', 'Europe/Paris'), ('2015-03-29 02:30', 'Europe/Belgrade')]) def test_tz_localize_nonexistent(self, stamp, tz): # GH#13057 ts = Timestamp(stamp) with pytest.raises(NonExistentTimeError): ts.tz_localize(tz) with pytest.raises(NonExistentTimeError): ts.tz_localize(tz, errors='raise') assert ts.tz_localize(tz, errors='coerce') is NaT def test_tz_localize_errors_ambiguous(self): # GH#13057 ts = Timestamp('2015-11-1 01:00') with pytest.raises(AmbiguousTimeError): ts.tz_localize('US/Pacific', errors='coerce') @pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00', '2014-11-05 00:00']) def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture): tz = tz_aware_fixture ts = Timestamp(stamp) localized = ts.tz_localize(tz) assert localized == Timestamp(stamp, tz=tz) with pytest.raises(TypeError): localized.tz_localize(tz) reset = localized.tz_localize(None) assert reset == ts assert reset.tzinfo is None def test_tz_localize_ambiguous_compat(self): # validate that pytz and dateutil are compat for dst # when the transition happens naive = Timestamp('2013-10-27 01:00:00') pytz_zone = 'Europe/London' dateutil_zone = 'dateutil/Europe/London' result_pytz = naive.tz_localize(pytz_zone, ambiguous=0) result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0) assert result_pytz.value == result_dateutil.value assert result_pytz.value == 1382835600000000000 if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'): # dateutil 2.6 buggy w.r.t. ambiguous=0 # see gh-14621 # see https://github.com/dateutil/dateutil/issues/321 assert (result_pytz.to_pydatetime().tzname() == result_dateutil.to_pydatetime().tzname()) assert str(result_pytz) == str(result_dateutil) elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'): # fixed ambiguous behavior assert result_pytz.to_pydatetime().tzname() == 'GMT' assert result_dateutil.to_pydatetime().tzname() == 'BST' assert str(result_pytz) != str(result_dateutil) # 1 hour difference result_pytz = naive.tz_localize(pytz_zone, ambiguous=1) result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1) assert result_pytz.value == result_dateutil.value assert result_pytz.value == 1382832000000000000 # dateutil < 2.6 is buggy w.r.t. ambiguous timezones if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'): # see gh-14621 assert str(result_pytz) == str(result_dateutil) assert (result_pytz.to_pydatetime().tzname() == result_dateutil.to_pydatetime().tzname()) @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), gettz('US/Eastern'), 'US/Eastern', 'dateutil/US/Eastern']) def test_timestamp_tz_localize(self, tz): stamp = Timestamp('3/11/2012 04:00') result = stamp.tz_localize(tz) expected = Timestamp('3/11/2012 04:00', tz=tz) assert result.hour == expected.hour assert result == expected # ------------------------------------------------------------------ # Timestamp.tz_convert @pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00', '2014-11-05 00:00']) def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture): tz = tz_aware_fixture ts = Timestamp(stamp, tz='UTC') converted = ts.tz_convert(tz) reset = converted.tz_convert(None) assert reset == Timestamp(stamp) assert reset.tzinfo is None assert reset == converted.tz_convert('UTC').tz_localize(None) @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) def test_astimezone(self, tzstr): # astimezone is an alias for tz_convert, so keep it with # the tz_convert tests utcdate = Timestamp('3/11/2012 22:00', tz='UTC') expected = utcdate.tz_convert(tzstr) result = utcdate.astimezone(tzstr) assert expected == result assert isinstance(result, Timestamp) @td.skip_if_windows def test_tz_convert_utc_with_system_utc(self): from pandas._libs.tslibs.timezones import maybe_get_tz # from system utc to real utc ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) # check that the time hasn't changed. assert ts == ts.tz_convert(dateutil.tz.tzutc()) # from system utc to real utc ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) # check that the time hasn't changed. assert ts == ts.tz_convert(dateutil.tz.tzutc()) # ------------------------------------------------------------------ # Timestamp.__init__ with tz str or tzinfo def test_timestamp_constructor_tz_utc(self): utc_stamp = Timestamp('3/11/2012 05:00', tz='utc') assert utc_stamp.tzinfo is pytz.utc assert utc_stamp.hour == 5 utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc') assert utc_stamp.hour == 5 def test_timestamp_to_datetime_tzoffset(self): tzinfo = tzoffset(None, 7200) expected = Timestamp('3/11/2012 04:00', tz=tzinfo) result = Timestamp(expected.to_pydatetime()) assert expected == result def test_timestamp_constructor_near_dst_boundary(self): # GH#11481 & GH#15777 # Naive string timestamps were being localized incorrectly # with tz_convert_single instead of tz_localize_to_utc for tz in ['Europe/Brussels', 'Europe/Prague']: result = Timestamp('2015-10-25 01:00', tz=tz) expected = Timestamp('2015-10-25 01:00').tz_localize(tz) assert result == expected with pytest.raises(pytz.AmbiguousTimeError): Timestamp('2015-10-25 02:00', tz=tz) result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris') assert result == expected with pytest.raises(pytz.NonExistentTimeError): Timestamp('2017-03-26 02:00', tz='Europe/Paris') # GH#11708 naive = Timestamp('2015-11-18 10:00:00') result = naive.tz_localize('UTC').tz_convert('Asia/Kolkata') expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata') assert result == expected # GH#15823 result = Timestamp('2017-03-26 00:00', tz='Europe/Paris') expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris') assert result == expected result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris') assert result == expected with pytest.raises(pytz.NonExistentTimeError): Timestamp('2017-03-26 02:00', tz='Europe/Paris') result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris') naive = Timestamp(result.value) expected = naive.tz_localize('UTC').tz_convert('Europe/Paris') assert result == expected result = Timestamp('2017-03-26 03:00', tz='Europe/Paris') expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris') assert result == expected @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), gettz('US/Eastern'), 'US/Eastern', 'dateutil/US/Eastern']) def test_timestamp_constructed_by_date_and_tz(self, tz): # GH#2993, Timestamp cannot be constructed by datetime.date # and tz correctly result = Timestamp(date(2012, 3, 11), tz=tz) expected = Timestamp('3/11/2012', tz=tz) assert result.hour == expected.hour assert result == expected @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), gettz('US/Eastern'), 'US/Eastern', 'dateutil/US/Eastern']) def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz): # GH#1389 # 4 hours before DST transition stamp = Timestamp('3/10/2012 22:00', tz=tz) result = stamp + timedelta(hours=6) # spring forward, + "7" hours expected = Timestamp('3/11/2012 05:00', tz=tz) assert result == expected def test_timestamp_timetz_equivalent_with_datetime_tz(self, tz_naive_fixture): # GH21358 if tz_naive_fixture is not None: tz = dateutil.tz.gettz(tz_naive_fixture) else: tz = None stamp = Timestamp('2018-06-04 10:20:30', tz=tz) _datetime = datetime(2018, 6, 4, hour=10, minute=20, second=30, tzinfo=tz) result = stamp.timetz() expected = _datetime.timetz() assert result == expected
bsd-3-clause
-6,155,344,485,097,374,000
39.498382
79
0.590698
false
zhuhuifeng/PyML
mla/neuralnet/layers/basic.py
1
4512
import autograd.numpy as np from autograd import elementwise_grad from mla.neuralnet.activations import get_activation from mla.neuralnet.parameters import Parameters np.random.seed(9999) class Layer(object): def setup(self, X_shape): """Allocates initial weights.""" pass def forward_pass(self, x): raise NotImplementedError() def backward_pass(self, delta): raise NotImplementedError() def shape(self, x_shape): """Returns shape of the current layer.""" raise NotImplementedError() class ParamMixin(object): @property def parameters(self): return self._params class PhaseMixin(object): _train = False @property def is_training(self): return self._train @is_training.setter def is_training(self, is_train=True): self._train = is_train @property def is_testing(self): return not self._train @is_testing.setter def is_testing(self, is_test=True): self._train = not is_test class Dense(Layer, ParamMixin): def __init__(self, output_dim, parameters=None, ): """A fully connected layer. Parameters ---------- output_dim : int """ self._params = parameters self.output_dim = output_dim self.last_input = None if parameters is None: self._params = Parameters() def setup(self, x_shape): self._params.setup_weights((x_shape[1], self.output_dim)) def forward_pass(self, X): self.last_input = X return self.weight(X) def weight(self, X): W = np.dot(X, self._params['W']) return W + self._params['b'] def backward_pass(self, delta): dW = np.dot(self.last_input.T, delta) db = np.sum(delta, axis=0) # Update gradient values self._params.update_grad('W', dW) self._params.update_grad('b', db) return np.dot(delta, self._params['W'].T) def shape(self, x_shape): return x_shape[0], self.output_dim class Activation(Layer): def __init__(self, name): self.last_input = None self.activation = get_activation(name) # Derivative of activation function self.activation_d = elementwise_grad(self.activation) def forward_pass(self, X): self.last_input = X return self.activation(X) def backward_pass(self, delta): return self.activation_d(self.last_input) * delta def shape(self, x_shape): return x_shape class Dropout(Layer, PhaseMixin): """Randomly set a fraction of `p` inputs to 0 at each training update.""" def __init__(self, p=0.1): self.p = p self._mask = None def forward_pass(self, X): assert self.p > 0 if self.is_training: self._mask = np.random.uniform(size=X.shape) > self.p y = X * self._mask else: y = X * (1.0 - self.p) return y def backward_pass(self, delta): return delta * self._mask def shape(self, x_shape): return x_shape class TimeStepSlicer(Layer): """Take a specific time step from 3D tensor.""" def __init__(self, step=-1): self.step = step def forward_pass(self, x): return x[:, self.step, :] def backward_pass(self, delta): return np.repeat(delta[:, np.newaxis, :], 2, 1) def shape(self, x_shape): return x_shape[0], x_shape[2] class TimeDistributedDense(Layer): """Apply regular Dense layer to every timestep.""" def __init__(self, output_dim): self.output_dim = output_dim self.n_timesteps = None self.dense = None self.input_dim = None def setup(self, X_shape): self.dense = Dense(self.output_dim) self.dense.setup((X_shape[0], X_shape[2])) self.input_dim = X_shape[2] def forward_pass(self, X): n_timesteps = X.shape[1] X = X.reshape(-1, X.shape[-1]) y = self.dense.forward_pass(X) y = y.reshape((-1, n_timesteps, self.output_dim)) return y def backward_pass(self, delta): n_timesteps = delta.shape[1] X = delta.reshape(-1, delta.shape[-1]) y = self.dense.backward_pass(X) y = y.reshape((-1, n_timesteps, self.input_dim)) return y @property def parameters(self): return self.dense._params def shape(self, x_shape): return x_shape[0], x_shape[1], self.output_dim
apache-2.0
4,052,290,290,537,210,400
23.791209
77
0.586436
false
rykov8/ssd_keras
ssd_layers.py
3
6719
"""Some special pupropse layers for SSD.""" import keras.backend as K from keras.engine.topology import InputSpec from keras.engine.topology import Layer import numpy as np import tensorflow as tf class Normalize(Layer): """Normalization layer as described in ParseNet paper. # Arguments scale: Default feature scale. # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if dim_ordering='th' or 4D tensor with shape: `(samples, rows, cols, channels)` if dim_ordering='tf'. # Output shape Same as input # References http://cs.unc.edu/~wliu/papers/parsenet.pdf #TODO Add possibility to have one scale for all features. """ def __init__(self, scale, **kwargs): if K.image_dim_ordering() == 'tf': self.axis = 3 else: self.axis = 1 self.scale = scale super(Normalize, self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) init_gamma = self.scale * np.ones(shape) self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name)) self.trainable_weights = [self.gamma] def call(self, x, mask=None): output = K.l2_normalize(x, self.axis) output *= self.gamma return output class PriorBox(Layer): """Generate the prior boxes of designated sizes and aspect ratios. # Arguments img_size: Size of the input image as tuple (w, h). min_size: Minimum box size in pixels. max_size: Maximum box size in pixels. aspect_ratios: List of aspect ratios of boxes. flip: Whether to consider reverse aspect ratios. variances: List of variances for x, y, w, h. clip: Whether to clip the prior's coordinates such that they are within [0, 1]. # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if dim_ordering='th' or 4D tensor with shape: `(samples, rows, cols, channels)` if dim_ordering='tf'. # Output shape 3D tensor with shape: (samples, num_boxes, 8) # References https://arxiv.org/abs/1512.02325 #TODO Add possibility not to have variances. Add Theano support """ def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None, flip=True, variances=[0.1], clip=True, **kwargs): if K.image_dim_ordering() == 'tf': self.waxis = 2 self.haxis = 1 else: self.waxis = 3 self.haxis = 2 self.img_size = img_size if min_size <= 0: raise Exception('min_size must be positive.') self.min_size = min_size self.max_size = max_size self.aspect_ratios = [1.0] if max_size: if max_size < min_size: raise Exception('max_size must be greater than min_size.') self.aspect_ratios.append(1.0) if aspect_ratios: for ar in aspect_ratios: if ar in self.aspect_ratios: continue self.aspect_ratios.append(ar) if flip: self.aspect_ratios.append(1.0 / ar) self.variances = np.array(variances) self.clip = True super(PriorBox, self).__init__(**kwargs) def get_output_shape_for(self, input_shape): num_priors_ = len(self.aspect_ratios) layer_width = input_shape[self.waxis] layer_height = input_shape[self.haxis] num_boxes = num_priors_ * layer_width * layer_height return (input_shape[0], num_boxes, 8) def call(self, x, mask=None): if hasattr(x, '_keras_shape'): input_shape = x._keras_shape elif hasattr(K, 'int_shape'): input_shape = K.int_shape(x) layer_width = input_shape[self.waxis] layer_height = input_shape[self.haxis] img_width = self.img_size[0] img_height = self.img_size[1] # define prior boxes shapes box_widths = [] box_heights = [] for ar in self.aspect_ratios: if ar == 1 and len(box_widths) == 0: box_widths.append(self.min_size) box_heights.append(self.min_size) elif ar == 1 and len(box_widths) > 0: box_widths.append(np.sqrt(self.min_size * self.max_size)) box_heights.append(np.sqrt(self.min_size * self.max_size)) elif ar != 1: box_widths.append(self.min_size * np.sqrt(ar)) box_heights.append(self.min_size / np.sqrt(ar)) box_widths = 0.5 * np.array(box_widths) box_heights = 0.5 * np.array(box_heights) # define centers of prior boxes step_x = img_width / layer_width step_y = img_height / layer_height linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x, layer_width) liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y, layer_height) centers_x, centers_y = np.meshgrid(linx, liny) centers_x = centers_x.reshape(-1, 1) centers_y = centers_y.reshape(-1, 1) # define xmin, ymin, xmax, ymax of prior boxes num_priors_ = len(self.aspect_ratios) prior_boxes = np.concatenate((centers_x, centers_y), axis=1) prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_)) prior_boxes[:, ::4] -= box_widths prior_boxes[:, 1::4] -= box_heights prior_boxes[:, 2::4] += box_widths prior_boxes[:, 3::4] += box_heights prior_boxes[:, ::2] /= img_width prior_boxes[:, 1::2] /= img_height prior_boxes = prior_boxes.reshape(-1, 4) if self.clip: prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0) # define variances num_boxes = len(prior_boxes) if len(self.variances) == 1: variances = np.ones((num_boxes, 4)) * self.variances[0] elif len(self.variances) == 4: variances = np.tile(self.variances, (num_boxes, 1)) else: raise Exception('Must provide one or four variances.') prior_boxes = np.concatenate((prior_boxes, variances), axis=1) prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0) if K.backend() == 'tensorflow': pattern = [tf.shape(x)[0], 1, 1] prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern) elif K.backend() == 'theano': #TODO pass return prior_boxes_tensor
mit
9,192,958,743,215,568,000
36.121547
78
0.564072
false
OmnesRes/pan_cancer
paper/cox_regression/KIRC/patient_info.py
1
6241
## A script for extracting info about the patients used in the analysis ## Load necessary modules from rpy2 import robjects as ro import numpy as np import os ro.r('library(survival)') ##This call will only work if you are running python from the command line. ##If you are not running from the command line manually type in your paths. BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt')) f.readline() f.readline() f.readline() data=[i.split('\t') for i in f] ## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent ## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data. ## This required an empty value in the list initialization. ## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...] clinical=[['','','']] for i in data: try: if clinical[-1][0]==i[0]: if i[8]=='Alive': clinical[-1]=[i[0],int(i[9]),'Alive'] elif i[8]=='Dead': clinical[-1]=[i[0],int(i[10]),'Dead'] else: pass else: if i[8]=='Alive': clinical.append([i[0],int(i[9]),'Alive']) elif i[8]=='Dead': clinical.append([i[0],int(i[10]),'Dead']) else: pass except: pass ## Removing the empty value. clinical=clinical[1:] ## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade. more_clinical={} grade_dict={} grade_dict['G1']=1 grade_dict['G2']=2 grade_dict['G3']=3 grade_dict['G4']=4 sex_dict={} sex_dict['MALE']=0 sex_dict['FEMALE']=1 ## The "clinical_patient" file can also contain patients not listed in the follow_up files. ## In these cases the clinical data for these patients gets appended to a new clinical list. f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt')) f.readline() f.readline() f.readline() clinical4=[] data=[i.split('\t') for i in f] for i in data: try: more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])] if i[24]=='Alive': clinical4.append([i[0],int(i[25]),'Alive']) elif i[24]=='Dead': clinical4.append([i[0],int(i[26]),'Dead']) else: pass except: pass new_clinical=[] ##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files ##All the clinical data is merged checking which data is the most up to date for i in clinical4: if i[0] not in [j[0] for j in clinical]: new_clinical.append(i) else: if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]: new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])]) else: new_clinical.append(i) ##also do the reverse since clinical can contain patients not included in clinical4 for i in clinical: if i[0] not in [j[0] for j in new_clinical]: new_clinical.append(i) ## only patients who had a follow up time greater than 0 days are included in the analysis clinical=[i for i in new_clinical if i[1]>0] final_clinical=[] ## A new list containing both follow up times and grade, sex, and age is constructed. ## Only patients with grade, sex, and age information are included. ## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...] for i in clinical: if i[0] in more_clinical: final_clinical.append(i+more_clinical[i[0]]) ## Need to map the mRNA files to the correct patients ## The necessary information is included in the FILE_SAMPLE_MAP.txt file f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt')) f.readline() data=[i.strip().split() for i in f if i!='\n'] ## 01 indicates a primary tumor, and only primary tumors are included in this analysis TCGA_to_mrna={} for i in data: ## The normalized data files are used if 'genes.normalized_results' in i[0]: if i[1].split('-')[3][:-1]=='01': x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])]) TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]] clinical_and_files=[] ## We only care about patients that contained complete clinical information for i in final_clinical: if TCGA_to_mrna.has_key(i[0]): ## The mRNA files are added to the clinical list ## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...] clinical_and_files.append(i+[TCGA_to_mrna[i[0]]]) else: pass ##print average age at diagnosis age=np.mean([i[5] for i in clinical_and_files]) ##print number of males males=len([i for i in clinical_and_files if i[4]==0]) ##print number of females females=len([i for i in clinical_and_files if i[4]==1]) ##to get the median survival we need to call survfit from r ##prepare variables for R ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files]) ##need to create a dummy variable group ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files]) ##need a vector for deaths death_dic={} death_dic['Alive']=0 death_dic['Dead']=1 ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files]) res=ro.r('survfit(Surv(times,died) ~ as.factor(group))') #the number of events(deaths) is the fourth column of the output deaths=str(res).split('\n')[-2].strip().split()[3] #the median survival time is the fifth column of the output median=str(res).split('\n')[-2].strip().split()[4] ##write data to a file f=open('patient_info.txt','w') f.write('Average Age') f.write('\t') f.write('Males') f.write('\t') f.write('Females') f.write('\t') f.write('Deaths') f.write('\t') f.write('Median Survival') f.write('\n') f.write(str(age)) f.write('\t') f.write(str(males)) f.write('\t') f.write(str(females)) f.write('\t') f.write(deaths) f.write('\t') f.write(median)
mit
3,009,862,005,212,880,400
28.300469
132
0.645089
false
l-althueser/NiMoNa_DCM16
DCM/programs/RK4.py
1
1088
# -*- coding: utf-8 -*- """ @author: Tobias Timo Beschreibung: Runge-Kutta-Verfahren vierter Ordnung zur Lösung von gewöhnlichen DGL 1. Ordnung. Ausgabe der Zeitentwicklung in Matrixform. Wichtig: Die Dimension des Eingabeparameters x_0 muss mit dem verwendetem Modell überinstimmen. Pythonversion: 3.5.1 """ import numpy as np def RK4(f,theta,u,x_0,t0,T,dt): #Input: Funktion, Parameterset, Stimulus, Anfangswert(array), Startpunkt, Endpunkt, Zeitschrittweite t = np.arange(t0,T,dt) # Zeitarray # x = np.zeros((int(len(x_0)), int((T - t0) / dt + 1))) #Größe der Endmatrix festlegen x = np.zeros((int(len(x_0)), len(t))) x[:,0] = x_0 # Startbedingungen in erster Spalte for i in range(0,int(np.size(x,1))-1): k_1 = f(x,u,theta,i) k_2 = f(x + 0.5*dt*k_1,u,theta,i) k_3 = f(x + 0.5*dt*k_2,u,theta,i) k_4 = f(x + dt*k_3,u,theta,i) x[:,i+1] = x[:,i] + (dt/6.)*(k_1.T + 2*k_2.T + 2*k_3.T + k_4.T) return x
bsd-2-clause
-8,869,651,187,784,231,000
29.111111
105
0.553093
false
dingliu0305/Tree-Tensor-Networks-in-Machine-Learning
code/tsne_mnist.py
1
3214
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # import matplotlib as mpl # mpl.use('Agg') import matplotlib.pyplot as plt import numpy as np import pickle from matplotlib.backends.backend_pdf import PdfPages from sklearn.manifold import TSNE data_folder = "./data/mnist/" n_epochs = 3 bond_data = 2 bond_inner = 3 bond_label = 2 n_class = 2 n_train_each = 900 n_train = n_train_each * n_class n_test_each = 1000 n_test = n_test_each * 10 layer_units = [16, 8, 4, 2, 1] train_tn = False LAB1 = [0] * n_train_each LAB2 = [1] * n_train_each LAB = np.concatenate((LAB1, LAB2), axis=0) # ------------------------------------------------------------------------- # load training data print("loading data") input = open(data_folder + 'tsne.pkl', 'rb') ttn = pickle.load(input) #%% LAB1 = [0] * n_train_each LAB2 = [1] * n_train_each LAB = np.concatenate((LAB1, LAB2), axis=0) def squash_layer(contraction, which_layer): layer = [] for row in contraction[which_layer]: layer += [element.data for element in row] return np.vstack(layer).T def tsne(contraction, which_layer, per, lear, tags=LAB): selection = np.random.choice(n_train, size=n_train, replace=False) mf = TSNE(n_components=2, perplexity=per, learning_rate=lear, init='pca', n_iter=1200) M = squash_layer(contraction, which_layer) x = M[selection] x_embed = mf.fit_transform(x) TAGS = [] TAGS = tags[selection] return x_embed, TAGS def sort(contraction, which_layer, per, lear, tags=LAB): x_embed, TAGS = tsne(contraction, which_layer, per, lear, tags=LAB) CATS = [] DOGS = [] for i in range(len(TAGS)): if TAGS[i] == 0: CATS.append(x_embed[i]) if TAGS[i] == 1: DOGS.append(x_embed[i]) result = np.concatenate((CATS, DOGS), axis=0) return result #%% def plot(contraction, which_layer, per, lear, tags=LAB): result = sort(contraction, which_layer, per, lear, tags=LAB) fig = plt.figure() ax1 = fig.add_subplot(111) x = result[:, 0] y = result[:, 1] ax1.scatter(x[0:n_train_each], y[0:n_train_each], s=11, c='b', marker="o", label='Planes', alpha=0.5) ax1.scatter(x[n_train_each + 1:n_train], y[n_train_each + 1:n_train], s=11, c='r', marker="o", label='Horses', alpha=0.5) plt.legend(loc='upper right') plt.axis('off') # plt.show() pp = PdfPages('%s_P%s_L%s.pdf' % (which_layer, per, lear)) pp.savefig(fig) pp.close() return fig #%% def sweep(contraction, which_layer, per, tags=LAB): L = [200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750] f = [] for i in range(0, len(L)): f = plot(contraction, i, per, L[i], tags=LAB) return f #%% def sweep2(contraction, which_layer, tags=LAB): G = [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140] q = [] for i in range(0, len(G)): q = sweep(contraction, which_layer, G[i], tags=LAB) return q #%% def sweep3(contraction, per, lear, tags=LAB): m = [] for i in range(1, 5): m = sweep2(contraction, i, tags=LAB) return m for i in range(5): plot(ttn.contracted, i, 60, 400) plt.show()
mit
9,021,563,037,263,550,000
22.289855
75
0.584941
false
ominux/scikit-learn
examples/manifold/plot_compare_methods.py
4
2211
""" ========================================= Comparison of Manifold Learning methods ========================================= An illustration of dimensionality reduction on the S-curve dataset with various manifold learning methods. For a discussion and comparison of these algorithms, see the :ref:`manifold module page <manifold>` """ # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> print __doc__ from time import time import pylab as pl from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold, datasets n_points = 1000 X, color = datasets.samples_generator.make_s_curve(n_points) n_neighbors = 10 out_dim = 2 fig = pl.figure(figsize=(12, 8)) pl.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(231, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral) ax.view_init(4, -72) except: ax = fig.add_subplot(231, projection='3d') pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral) methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() Y = manifold.LocallyLinearEmbedding(n_neighbors, out_dim, eigen_solver='auto', method=method).fit_transform(X) t1 = time() print "%s: %.2g sec" % (methods[i], t1 - t0) ax = fig.add_subplot(232 + i) pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral) pl.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) pl.axis('tight') t0 = time() Y = manifold.Isomap(n_neighbors, out_dim).fit_transform(X) t1 = time() print "Isomap: %.2g sec" % (t1 - t0) ax = fig.add_subplot(236) pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral) pl.title("Isomap (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) pl.axis('tight') pl.show()
bsd-3-clause
-8,009,370,989,050,230,000
29.708333
71
0.620081
false
StongeEtienne/trimeshpy
trimeshpy/math/mesh_map.py
1
6095
# Etienne St-Onge from __future__ import division import numpy as np from scipy.sparse import csc_matrix from trimeshpy.math.util import square_length from trimeshpy.math.mesh_global import G_DTYPE # Mesh structure functions # Map ( Adjacency / Connectivity ) Functions # # number of vertices = n # number of triangles = m # number of edges = l = m*3 (directed edge) # # vertices array : n x 3 # v[i] = [ x, y, z ] # # triangles array : m x 3 # t[a] = [ v[i], v[j], v[k] ] # right handed triangles1111111111111 # # # Example : # # vj_ _ _ _ vo # /\ /\ # / \ tf / \ # / ta \ / te \ # vk/_ _ _ vi _ _ _\ vn # \ /\ / # \ tb / \ td / # \ / tc \ / # \/_ _ _ \/ # vl vm # # Vertices = [v[i] = [x_i, y_i, z_i], # v[j] = [x_j, y_j, z_j], # v[k] = [x_k, y_k, z_k], # v[l] = [x_l, y_l, z_l], # v[m] = [x_m, y_m, z_m], # v[n] = [x_n, y_n, z_n], # v[o] = [x_o, y_o, z_o]] # # Triangles = [t[a] = [i, j, k], # t[b] = [i, k, l], # t[c] = [i, l, m], # t[d] = [i, m, n], # t[e] = [i, n, o], # t[f] = [i, o, j]] # # triangle_vertex_map : m x n -> boolean, loss of orientation # t_v[] v[i] v[j] v[k] v[l] v[m] v[n] v[o] # t[a] 1 1 1 # t[b] 1 1 1 # t[c] 1 1 1 # t[d] 1 1 1 # t[e] 1 1 1 # t[f] 1 1 1 # # Edges Maps # edge_adjacency : n x n -> boolean, not symmetric if mesh not closed # e[,] v[i] v[j] v[k] v[l] v[m] v[n] v[o] # v[i] 1 1 1 1 1 1 # v[j] 1 1 # v[k] 1 1 # v[l] 1 1 # v[m] 1 1 # v[n] 1 1 # v[o] 1 1 # # edge_triangle_map : n x n -> triangle_index # e_t[,] v[i] v[j] v[k] v[l] v[m] v[n] v[o] # v[i] a b c d e f # v[j] f a # v[k] a b # v[l] b c # v[m] c d # v[n] d e # v[o] e f # # edge_opposing_vertex : n x n -> vertex_index # e_ov[] v[i] v[j] v[k] v[l] v[m] v[n] v[o] # v[i] k l m n o j # v[j] o i # v[k] j i # v[l] k i # v[m] l i # v[n] m i # v[o] n i # # edge_adjacency : n x n -> boolean (sparse connectivity matrix) # e[i,j] = v[i] -> v[j] = { 1, if connected } def edge_adjacency(triangles, vertices): vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]]) vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]]) values = np.ones_like(vts_i, dtype=np.bool) vv_map = csc_matrix((values, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0])) return vv_map # edge_sqr_length : n x n -> float (sparse connectivity matrix) # e[i,j] = v[i] -> v[j] = { || v[i] - v[j] ||^2, if connected } def edge_sqr_length(triangles, vertices): vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]]) vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]]) values = square_length(vertices[vts_i] - vertices[vts_j]) vv_map = csc_matrix((values, (vts_i, vts_j)), shape=( vertices.shape[0], vertices.shape[0]), dtype=G_DTYPE) return vv_map # edge_length : n x n -> float (sparse connectivity matrix) # e[i,j] = v[i] -> v[j] = { || v[i] - v[j] ||, if connected } def edge_length(triangles, vertices): vv_map = edge_sqr_length(triangles, vertices) vv_map.data = np.sqrt(vv_map.data) return vv_map # edge_sqr_length : n x n -> float (sparse connectivity matrix) # e[i,j] = { edge_length, if l2_weighted } def edge_map(triangles, vertices, l2_weighted=False): if l2_weighted: return edge_length(triangles, vertices) else: return edge_adjacency(triangles, vertices) # edge_triangle_map : n x n -> triangle_index (sparse connectivity matrix) # e_t[i,j] = e[i,j] -> t[a] = { 1, if triangle[a] is compose of edge[i,j] } def edge_triangle_map(triangles, vertices): vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]]) vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]]) triangles_index = np.tile(np.arange(len(triangles)), 3) vv_t_map = csc_matrix((triangles_index, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0])) return vv_t_map # edge_opposing_vertex : n x n -> vertex_index (int) (sparse co matrix) # e[i,j] = v[i],v[j] = { v[k], if v[i],v[j],v[k] triangle exist } def edge_opposing_vertex(triangles, vertices): vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]]) vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]]) vts_k = np.hstack([triangles[:, 2], triangles[:, 0], triangles[:, 1]]) vv_v_map = csc_matrix((vts_k, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0])) return vv_v_map # triangle_vertex_map : m x n -> bool (sparse connectivity matrix) # t_v[i,a] = t[a] <-> v[i] = { 1, if triangle[a] is compose of vertex[i] } def triangle_vertex_map(triangles, vertices): triangles_index = np.repeat(np.arange(len(triangles)), 3) vertices_index = np.hstack(triangles) values = np.ones_like(triangles_index, dtype=np.bool) tv_map = csc_matrix((values, (triangles_index, vertices_index)), shape=(len(triangles), vertices.shape[0])) return tv_map def vertices_degree(triangles, vertices): tv_matrix = triangle_vertex_map(triangles, vertices) return np.squeeze(np.asarray(tv_matrix.sum(0)))
mit
-4,785,637,220,778,597,000
34.028736
78
0.474815
false
hosseinsadeghi/ultracold-ions
test/test_CoulombAcc.py
2
4459
import uci.CoulombAcc as uci import numpy as np import pyopencl as cl import pyopencl.array as cl_array testCtx = cl.create_some_context(interactive = True) testQueue = cl.CommandQueue(testCtx) def test_Constructor(): coulomb_acc = uci.CoulombAcc() def test_ForceOnSingleParticleIsZero(): coulomb_acc = uci.CoulombAcc(testCtx, testQueue) one = np.ones(1) ax = np.zeros(1) ay = np.zeros(1) az = np.zeros(1) xd = cl_array.to_device(testQueue, one) yd = cl_array.to_device(testQueue, one) zd = cl_array.to_device(testQueue, one) vxd = cl_array.to_device(testQueue, one) vyd = cl_array.to_device(testQueue, one) vzd = cl_array.to_device(testQueue, one) qd = cl_array.to_device(testQueue, one) md = cl_array.to_device(testQueue, one) axd = cl_array.to_device(testQueue, ax) ayd = cl_array.to_device(testQueue, ay) azd = cl_array.to_device(testQueue, az) coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md, axd, ayd, azd, 0) axd.get(testQueue, ax) ayd.get(testQueue, ay) azd.get(testQueue, az) assert ax[0] == 0 assert ay[0] == 0 assert az[0] == 0 def test_TwoParticlesWithEqualChargeRepelEachOther(): coulomb_acc = uci.CoulombAcc(testCtx, testQueue) one = np.ones(2) ax = np.zeros(2) ay = np.zeros(2) az = np.zeros(2) x = np.array([0.1, 1]) y = np.array([0.2, 2.3]) z = np.array([0.3, 2.7]) xd = cl_array.to_device(testQueue, x) yd = cl_array.to_device(testQueue, y) zd = cl_array.to_device(testQueue, z) vxd = cl_array.to_device(testQueue, one) vyd = cl_array.to_device(testQueue, one) vzd = cl_array.to_device(testQueue, one) qd = cl_array.to_device(testQueue, one) md = cl_array.to_device(testQueue, one) axd = cl_array.to_device(testQueue, ax) ayd = cl_array.to_device(testQueue, ay) azd = cl_array.to_device(testQueue, az) coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md, axd, ayd, azd, 0) axd.get(testQueue, ax) ayd.get(testQueue, ay) azd.get(testQueue, az) assert ax[0] != 0 assert np.abs(ax[0] + ax[1]) < 1.0e-6 assert np.abs(ay[0] + ay[1]) < 1.0e-6 assert np.abs(az[0] + az[1]) < 1.0e-6 def reference_solution(x, y, z, vx, vy, vz, q, m, ax, ay, az): epsilon0 = 8.854187817e-12 for i in range(x.size): for j in range(x.size): prefactor = 1.0 / (4.0 * np.pi * epsilon0) * q[i] * q[j] r = np.sqrt( (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]) + (z[i] - z[j]) * (z[i] - z[j]) + 1.0e-20 ) rCubed = np.power(r, 3.0) ax[i] += prefactor * (x[i] - x[j]) / rCubed / m[i] ay[i] += prefactor * (y[i] - y[j]) / rCubed / m[i] az[i] += prefactor * (z[i] - z[j]) / rCubed / m[i] def compareWithReferenceSol(n): coulomb_acc = uci.CoulombAcc(testCtx, testQueue) x = np.random.random_sample(n) - 0.5 y = np.random.random_sample(n) - 0.5 z = np.random.random_sample(n) - 0.5 vx = np.random.random_sample(n) - 0.5 vy = np.random.random_sample(n) - 0.5 vz = np.random.random_sample(n) - 0.5 q = np.random.random_sample(n) - 0.5 m = np.random.random_sample(n) - 0.5 ax = np.zeros(n) ay = np.zeros(n) az = np.zeros(n) xd = cl_array.to_device(testQueue, x) yd = cl_array.to_device(testQueue, y) zd = cl_array.to_device(testQueue, z) vxd = cl_array.to_device(testQueue, vx) vyd = cl_array.to_device(testQueue, vy) vzd = cl_array.to_device(testQueue, vz) qd = cl_array.to_device(testQueue, q) md = cl_array.to_device(testQueue, m) axd = cl_array.to_device(testQueue, ax) ayd = cl_array.to_device(testQueue, ay) azd = cl_array.to_device(testQueue, az) coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md, axd, ayd, azd, 0) axd.get(testQueue, ax) ayd.get(testQueue, ay) azd.get(testQueue, az) ax_ref = np.zeros(n) ay_ref = np.zeros(n) az_ref = np.zeros(n) reference_solution(x, y, z, vx, vy, vz, q, m, ax_ref, ay_ref, az_ref) for i in range(n): assert np.abs(ax[i] - ax_ref[i]) / ( np.abs(ax[i]) + np.abs(ax_ref[i])) < 1.0e-6 def test_SmallSystem(): compareWithReferenceSol(10) def test_PowerOfTwo(): compareWithReferenceSol(128)
mit
-2,679,120,046,525,874,700
30.401408
73
0.578381
false
hughperkins/gpu-experiments
gpuexperiments/occupancy_dyn_graphs.py
1
2094
""" Try using dynamic shared memory, see if gets optimized away, or affects occupancy """ from __future__ import print_function, division import argparse import string import numpy as np import os import matplotlib.pyplot as plt plt.rcdefaults() import matplotlib.pyplot as plt from os.path import join parser = argparse.ArgumentParser() parser.add_argument('--devicename') args = parser.parse_args() times = [] assert args.devicename is not None deviceNameSimple = args.devicename f = open('results/occupancy_dyn_%s.tsv' % args.devicename, 'r') f.readline() for line in f: split_line = line.split('\t') times.append({'name': split_line[0], 'time': float(split_line[1]), 'flops': float(split_line[2])}) f.close() X32_list = [] Y32_list = [] X64_list = [] Y64_list = [] for timeinfo in times: name = timeinfo['name'] if not name.startswith('k1_g1024_b'): continue block = int(name.split('_')[2].replace('b', '')) x = int(name.split('_')[-1].replace('s', '')) y = timeinfo['flops'] if block == 32: X32_list.append(x) Y32_list.append(y) elif block == 64: X64_list.append(x) Y64_list.append(y) X32 = np.array(X32_list) X64 = np.array(X64_list) Y32 = np.array(Y32_list) Y64 = np.array(Y64_list) plt.plot(X32, Y32, label='blocksize 32') plt.plot(X64, Y64, label='blocksize 64') plt.axis([0, max(X32), 0, max(Y64)]) plt.title(deviceNameSimple) plt.xlabel('Shared memory per block (KiB)') plt.ylabel('GFLOPS') legend = plt.legend(loc='upper right') # fontsize='x-large') plt.savefig('/tmp/occupancy_by_shared_%s.png' % deviceNameSimple, dpi=150) plt.close() X_list = [] Y_list = [] for timeinfo in times: name = timeinfo['name'] if not name.startswith('kernel_bsm'): continue X_list.append(int(name.split('bsm')[1].split(' ')[0])) Y_list.append(timeinfo['flops']) X = np.array(X_list) Y = np.array(Y_list) plt.plot(X, Y) plt.axis([0, max(X), 0, max(Y)]) plt.title(deviceNameSimple) plt.xlabel('blocks per SM') plt.ylabel('GFLOPS') plt.savefig('/tmp/occupancy_%s.png' % deviceNameSimple, dpi=150)
bsd-2-clause
-6,801,072,646,690,389,000
26.552632
102
0.660936
false
ljschumacher/tierpsy-tracker
tierpsy/analysis/stage_aligment/findStageMovement.py
2
58507
import numpy as np import warnings import tables from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name from tierpsy.helper.params import read_fps from tierpsy.analysis.stage_aligment.get_mask_diff_var import get_mask_diff_var def _matlab_std(x): if x.size <= 1: #in array of size 1 MATLAB returns 0 in the std while numpy nan return 0. else: #ddof=1 to have the same behaviour as MATLAB return np.nanstd(x, ddof=1) def getFrameDiffVar(masked_file, progress_refresh_rate_s=100): base_name = get_base_name(masked_file) progress_prefix = '{} Calculating variance of the difference between frames.'.format(base_name) with tables.File(masked_file, 'r') as fid: masks = fid.get_node('/mask') tot, w, h = masks.shape progress_time = TimeCounter(progress_prefix, tot) fps = read_fps(masked_file, dflt=25) progress_refresh_rate = int(round(fps*progress_refresh_rate_s)) img_var_diff = np.zeros(tot-1) frame_prev = masks[0] for ii in range(1, tot): frame_current = masks[ii] img_var_diff[ii-1] = get_mask_diff_var(frame_current, frame_prev) frame_prev = frame_current; if ii % progress_refresh_rate == 0: print_flush(progress_time.get_str(ii)) if tot>1: print_flush(progress_time.get_str(ii)) return img_var_diff def graythreshmat(I_ori): #reimplementation of the matlab graythresh for consistency #it convert the image into a uint8 if it is a double it assumes #it is between 0 and 1, I = I_ori.copy() #make nan zeros (that's what matlab does) I[np.isnan(I)]=0 assert np.all(I>=0) and np.all(I<=1) I = np.round(I*255).astype(np.uint8) if np.all(I == I[0]): #if all values are equal return 0 return 0 num_bins = 256; counts = np.bincount(I, minlength=num_bins); p = counts/np.sum(counts) omega = np.cumsum(p) mu = np.cumsum(p *(np.arange(1, num_bins+1))); mu_t = mu[-1] with warnings.catch_warnings(): warnings.simplefilter("ignore") sigma_b_squared = ((mu_t * omega - mu)**2) / (omega * (1 - omega)); if not np.all(np.isnan(sigma_b_squared)): maxval = np.nanmax(sigma_b_squared); idx = np.mean(np.where(sigma_b_squared == maxval)[0]); level = idx / (num_bins - 1); else: level = 0 return level def _get_small_otsu(frame_diffs, th): with warnings.catch_warnings(): warnings.filterwarnings("ignore") small_diffs = frame_diffs[frame_diffs < th]; small_th = np.nanmedian(small_diffs) + 3 * _matlab_std(small_diffs); return small_diffs, small_th def maxPeaksDistHeight(x, dist, height): """ %MAXPEAKSDISTHEIGHT Find the maximum peaks in a vector. The peaks are % separated by, at least, the given distance unless interrupted and are, at least, the given % height. % % [PEAKS INDICES] = MAXPEAKSDISTHEIGHT(X, DIST, HEIGHT) % % Input: % x - the vector to search for maximum peaks % dist - the minimum distance between peaks % height - the minimum height for peaks % % Output: % peaks - the maximum peaks % indices - the indices for the peaks % % % © Medical Research Council 2012 % You will not remove any copyright or other notices from the Software; % you must reproduce all copyright notices and other proprietary % notices on any copies of the Software. """ #% Is the vector larger than the search window? winSize = 2 * dist + 1; if x.size < winSize: peak = np.nanmax(x) if peak < height: return np.zeros(0), np.zeros(0) #initialize variables peaks = [] indices = [] im = None; #% the last maxima index ip = None; #% the current, potential, max peak index p = None; #% the current, potential, max peak value i = 0; #% the vector index #% Search for peaks. while i < x.size: #% Found a potential peak. if (x[i] >= height) and ((p is None) or (x[i] > p)): ip = i; p = x[i]; #% Test the potential peak. if (p is not None) and ((i - ip >= dist) or (i == x.size-1)): #% Check the untested values next to the previous maxima. if (im is not None) and (ip - im <= 2 * dist): #% Record the peak. if p > np.nanmax(x[(ip - dist):(im + dist+1)]): indices.append(ip); peaks.append(p); #% Record the maxima. im = ip; ip = i; p = x[ip]; #% Record the peak. else: indices.append(ip); peaks.append(p); im = ip; ip = i; p = x[ip]; #% Advance. i = i + 1; return np.array(peaks), np.array(indices) #%% def _initial_checks(mediaTimes, locations, delayFrames, fps): if fps < 0.1 or fps > 100: warnings.warn('WeirdFPS: recorded at {} frames/second. An unusual frame rate'.format(fps)) if mediaTimes.size > 0 and mediaTimes[0] != 0: raise ValueError('NoInitialMediaTime. The first media time must be 0') if not isinstance(delayFrames, int): delayFrames = int(delayFrames) #%% # Save the spare 0 media time location in case the corresponding # stage-movement, frame difference occured after the video started. spareZeroTimeLocation = []; # If there's more than one initial media time, use the latest one. if (mediaTimes.size > 1): i = 1; while (i < mediaTimes.size and mediaTimes[i] == 0): i = i + 1; if i > 1: spareZeroTimeLocation = locations[i - 2,:]; #% Dump the extraneous 0 media times and locations. mediaTimes = mediaTimes[i-1:] locations = locations[i-1:] #%% return mediaTimes, locations, delayFrames, fps, spareZeroTimeLocation def _norm_frame_diffs(frameDiffs): #% Are there enough frames? if np.sum(~np.isnan(frameDiffs)) < 2: raise ValueError('InsufficientFrames. The video must have at least 2, non-dropped frames'); #% No frame difference means the frame was dropped. frameDiffs[frameDiffs == 0] = np.nan; #% Normalize the frame differences and shift them over one to align them #% with the video frames. frameDiffs /= np.nanmax(frameDiffs) frameDiffs = np.insert(frameDiffs, 0 , frameDiffs[0]) return frameDiffs #%% #%% def _init_search(frameDiffs, gOtsuThr, gSmallDiffs, gSmallThr, mediaTimes, maxMoveFrames, fps): #%% #% The log file doesn't contain any stage movements. if mediaTimes.size < 2: warnings.warn('NoStageMovements. The stage never moves'); #% Are there any large frame-difference peaks? if gOtsuThr >= gSmallThr: _, indices = maxPeaksDistHeight(frameDiffs, maxMoveFrames-1, gOtsuThr); warnings.warn('UnexpectedPeaks. There are {} large frame-difference ' \ 'peaks even though the stage never moves'.format(indices.size)); return None #% Does the Otsu threshold separate the 99% of the small frame differences #% from the large ones? if gSmallDiffs.size==0 or gOtsuThr < gSmallThr: warnings.warn("NoGlobalOtsuThreshold. Using the Otsu method, as a whole, " \ "the frame differences don't appear to contain any distinguishably " \ "large peaks (corresponding to stage movements). Trying half of the " \ "maximum frame difference instead.") #% Try half the maximum frame difference as a threshold to distinguish large peaks. gOtsuThr = 0.5 gSmallDiffs, gSmallThr = _get_small_otsu(frameDiffs, gOtsuThr) #% Does a threshold at half the maximum frame difference separate the #% 99% of the small frame differences from the large ones? if gSmallDiffs.size==0 or gOtsuThr < gSmallThr: warnings.warn('NoGlobalThresholds. Cannot find a global threshold to ' \ 'distinguish the large frame-difference peaks.'); gOtsuThr = np.nan; gSmallThr = np.nan; #%% #% Pre-allocate memory. frames = np.zeros(frameDiffs.shape); #% stage movement status for frames movesI = np.full((mediaTimes.size, 2), -100, np.int) movesI[0,:] = 0; #% Compute the search boundary for the first frame-difference peak. maxMoveTime = maxMoveFrames / fps; #% maximum time a movement takes timeOff = maxMoveTime; #% the current media time offset peakI = 0; # the current stage movement peak's index prevPeakI = 0; # the previous stage-movement peak's index prevPeakEndI = 0; # the previous stage-movement peak's end index startI = 0; # the start index for our search endI = 2 * maxMoveFrames-1 #due to the different index from python and matlab endI = min(endI, frameDiffs.size-1); #% the end index for our search searchDiffs = frameDiffs[startI:endI+1]; #% Is the Otsu threshold large enough? otsuThr = graythreshmat(searchDiffs); isOtsu = otsuThr > gOtsuThr; #% false if no global Otsu if not isOtsu: # Does the Otsu threshold separate the 99% of the small frame # differences from the large ones? And, if there is a global small # threshold, is the Otsu threshold larger? smallDiffs, smallThr = _get_small_otsu(searchDiffs, otsuThr) isOtsu = (smallDiffs.size > 0) & \ np.any(~np.isnan(smallDiffs)) & \ (np.isnan(gSmallThr) | (otsuThr > gSmallThr)) & \ (otsuThr >= smallThr) # Does the global Otsu threshold pull out any peaks? if not isOtsu and \ not np.isnan(gOtsuThr) and \ np.any(searchDiffs > gOtsuThr): otsuThr = gOtsuThr; isOtsu = True; if isOtsu: #% Do the frame differences begin with a stage movement? indices, = np.where(searchDiffs > otsuThr); firstPeakI = indices[0]; if firstPeakI < maxMoveFrames: #% Find the largest frame-difference peak. peakI = np.nanargmax(frameDiffs[:maxMoveFrames]); prevPeakI = peakI; #% Compute the media time offset. timeOff = (peakI +1) / fps; # Is there a still interval before the first stage movement? if peakI > 0: i = peakI - 1; while i > 0: if frameDiffs[i] < gSmallThr and frameDiffs[i - 1] < gSmallThr: peakI = 0; break i -= 1 #% We reached the end. endI = peakI + maxMoveFrames; if endI >= frameDiffs.size-1: prevPeakEndI = frameDiffs.size; #% Find a temporary front end for a potential initial stage movement. else: searchDiffs = frameDiffs[peakI:endI+1]; # Does the search window contain multiple stage movements? if not (np.isnan(gOtsuThr) or np.isnan(gSmallThr)): foundMove = False; for i in range(searchDiffs.size): #% We found a still interval. if not foundMove and searchDiffs[i] < gSmallThr: foundMove = True; # We found the end of the still interval, cut off the rest. elif foundMove and searchDiffs[i] > gSmallThr: searchDiffs = searchDiffs[0:(i - 1)] break # Find a temporary front end for a potential initial stage movement. i = np.nanargmin(searchDiffs); peakFrontEndI = peakI + i; minDiff = searchDiffs[i] # If the temporary front end's frame difference is small, try to push # the front end backwards (closer to the stage movement). if minDiff <= gSmallThr: i = peakI while i < peakFrontEndI: if frameDiffs[i] <= gSmallThr: peakFrontEndI = i; break; i += 1 #% If the temporary front end's frame difference is large, try to #% push the front end forwards (further from the stage movement). elif minDiff >= gOtsuThr or \ (minDiff > gSmallThr and \ peakFrontEndI < endI and \ np.all(np.isnan(frameDiffs[(peakFrontEndI + 1):endI]))): peakFrontEndI = endI; prevPeakEndI = peakFrontEndI; #%% return frames, movesI, prevPeakI, prevPeakEndI, maxMoveTime, timeOff #%% def _get_search_diff(frameDiffs, prevPeakEndI, mediaTimeOffI, maxMoveFrames): startI = prevPeakEndI; # Compute the search boundary for matching frame-difference peaks. x1 = startI + 2 * abs(mediaTimeOffI - (startI+1)) x2 = max((startI+1), mediaTimeOffI) + maxMoveFrames - 1 endI = min(max(x1, x2), frameDiffs.size-1) searchDiffs = frameDiffs[startI:endI+1]; return searchDiffs, startI, endI #%% def get_otsu_thresh(frameDiffs, searchDiffs, gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr): #% Is the Otsu threshold large enough? otsuThr = graythreshmat(searchDiffs); isOtsu = otsuThr > prevSmallThr or otsuThr > gOtsuThr; if not isOtsu: #% Does the Otsu threshold separate the 99% of the small frame #% differences from the large ones? if np.isnan(prevSmallThr) or otsuThr > prevSmallThr or otsuThr > gSmallThr: smallDiffs, smallThr = _get_small_otsu(frameDiffs, otsuThr) isOtsu = (len(smallDiffs)>0) & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr); #% Try the global Otsu threshold or, if there is none, attempt to #% use half the search window's maximum frame difference. if not isOtsu: #% Try using half the search window's maximum frame difference. if np.isnan(gOtsuThr): otsuThr = np.nanmax(searchDiffs) / 2; #% Does the half-maximum threshold separate the 99% of the #% small frame differences from the large ones? smallDiffs, smallThr = _get_small_otsu(frameDiffs, otsuThr) isOtsu = smallDiffs & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr); #% Does the global Otsu threshold pull out any peaks? elif np.any(searchDiffs > gOtsuThr): otsuThr = gOtsuThr; isOtsu = True; #% Does the global Otsu threshold pull out any peaks? elif np.any(searchDiffs > prevOtsuThr): otsuThr = prevOtsuThr; isOtsu = True; return isOtsu, otsuThr def _get_peak_indices(frameDiffs, searchDiffs, isOtsu, otsuThr, gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr, maxMoveFrames): #% Match the media time stage movement to a peak. if not isOtsu: indices = []; else: #% Compute and set the global thresholds. if np.isnan(gOtsuThr): #% Use a small threshold at 99% of the small frame differences. smallDiffs, smallThr = _get_small_otsu(frameDiffs, gOtsuThr) #% Set the global thresholds. if otsuThr >= smallThr: gOtsuThr = otsuThr; gSmallThr = smallThr; #% Set the previous small threshold. if np.isnan(prevOtsuThr): prevOtsuThr = otsuThr; prevSmallThr = smallThr; #% Use the previous small threshold. elif not np.isnan(prevSmallThr): smallThr = prevSmallThr; #% Compute the local thresholds. else: otsuThr = min(otsuThr, gOtsuThr); smallThr = max(prevSmallThr, gSmallThr); if smallThr > otsuThr: smallThr = min(prevSmallThr, gSmallThr); #% Does the search window contain multiple stage movements? foundMove = False; for j in range(searchDiffs.size): #% We found a stage movement. if not foundMove and searchDiffs[j] > otsuThr: foundMove = True; #% We found the end of the stage movement, cut off the rest. elif foundMove and searchDiffs[j] < smallThr: searchDiffs = searchDiffs[0:j]; break; #% Find at least one distinguishably large peak. _, indices = maxPeaksDistHeight(searchDiffs, maxMoveFrames, otsuThr); return indices, prevOtsuThr, prevSmallThr #%% def findStageMovement(frameDiffs, mediaTimes, locations, delayFrames, fps): ''' %MODIFIED FROM SEGWORM AEJ. This help is outdated, I'll modified later. AEJ %FINDSTAGEMOVEMENT Find stage movements in a worm experiment. % % The algorithm is as follows: % % 4. If there are no stage movements, we're done. % % 5. The log file sometimes contains more than one entry at 0 media time. % These represent stage movements that may have completed before the video % begins. Therefore, we don't use them but, instead, store them in case we % find their stage movement in the video frame differences. % % 6. The frame differences need to be aligned to the video frames. % Therefore, we copy the first one and shift the rest over so that the % frame differences are one-to-one with the video frames. Note that video % indexing starts at 0 while Matlab indexing begins at 1. Also note, due % to differentiation, large frame differences that occur at the beginning % of a stage movement now represent the first frame of movement. Large % frame differences that occur at the end of a stage movement now represent % the first non-movement frame. % % 7. Compute the global Otsu threshold for the frame-differences to % distinguish stage-movement peaks. Then compute a global non-movement % threshold by taking all values less than the Otsu, and computing 3 % standard deviations from the median (approximately 99% of the small % values). Please note, stage movements ramp up/down to % accelerate/decelerate to/from the target speed. Therefore, the values % below the Otsu threshold are contaminated with stage movement % acceleration and decelaration. Fortunately, non-movement frames account % for more than 50% of the frame differences. Therefore, to avoid the stage % movement contamination, we use the median of the small values rather than % their mean when computing the global non-movement (small) threshold. If % this small threshold is greater than the Otsu, we've made a poor choice % and ignore both thresholds. Otherwise, these 2 global thresholds serve as % a backup to the local ones when distinguishing stage movements. % % 8. Ocasionally, computing the global Otsu threshold fails. This occurs % when a long video has few stage movements. In this case, stage movements % appear to be rare outliers and the Otsu method minimizes in-group % variance by splitting the non-stage movement modality into two groups % (most likely periods of worm activity and inactivity). Under these % circumstances we attempt to use a global threshold at half the maximum % frame-difference variance. As detailed above, we test this threshold to % see whether it is sufficiently larger than 99% of the smaller movements. % % 9. When searching for stage movements, we use the same algorithm as the % one above(see step 7), over a smaller well-defined, search window, to % determine the local Otsu threshold. The local small threshold is computed % slightly differently (we use the mean instead of the median -- see step % 12 for an explanation). If the local Otsu threshold fails (it's smaller % than 99% of the values below it and smaller than the global Otsu), we % attempt to use the global one to see if it pulls out a peak. % % 10. A stage movement peak is defined as the largest value that exceeds % the minimum of the global and local Otsu thresholds. To avoid a situation % in which 2 stage movements occur within the same search window, we scan % the window for the first value exceeding the Otsu threshold and, if any % subsequent values drop below the minimum of global and local small % thresholds, we cut off the remainder of the window and ignore it. % % 11. Once a stage-movement peak is identified, we search for a temporary % back and front end to the movement. The stage movement must complete % within one delay time window (see step 2). Therefore, we search for the % minimum values, within one delay time window, before and after the peak. % The locations of these minimum values are the temporary back and front % ends for the stage movement. If the either value is below the small % threshold, we may have overshot the movement and, therefore, attempt to % find a location closer to the peak. Similarly, if either value is greater % than the maximum of the global and local small thresholds and the % remaining values till the end of the window are NaNs or, if either value % is greater than the Otsu threshold, we may have undershot the movement % and, therefore, attempt to find a location further from the peak. % % 12. Using one stage movement's temporary front end and the subsequent % movement's temporary back end, we compute the small threshold. This % interval is assumed to have no stage motion and, therefore, represents % frame-difference variance from a non-movement interval. The local small % threshold is defined as 3 deviations from the mean of this non-movement % interval (99% confidence). With this small threshold, we start at the % first peak and search forward for its real front end. Similarly, we start % at the subsequent peak and search backward for its real back end. % % 13. Conservatively, the beginning and end of the video are treated as the % end and begining of stage movements, respectively. We search for a % temporary front end and a temporary back end, respectively, using the % global small and Otsu thresholds. % % 14. After the final, logged stage motion is found in the frame % differences, we look to see if there are any subsequent, extra peaks. % An Otsu threshold is computed, as detailed earlier, using the interval % spanning from the final stage movement's temporary front end till the % final frame difference. If this threshold is unsuitable, we use the % global Otsu threshold. If we find any extra peaks, the first peak's back % end is located and its frame as well as the remainder of the frame % differences (including any other extra peaks) are marked as a single % large stage movement that extends till the end of the video. This is % necessary since Worm Tracker users can terminate logging prior to % terminating the video (e.g., this may occur automatically if the worm is % lost). % % 15. To find a stage movement, we compute its offset media time. The first % stage movement is offset by the delay time (see step 2). Subsequent media % times are offset by the difference between the previous media time and % its stage-movement peak. Therefore, each stage movement provides the % offset for the next one. The longer the wait till the next stage % movement, the less accurate this offset becomes. Therefore, we search for % each stage movement using a window that begins at the last stage % movement's temporary front end and ends at the offset media time plus % this distance (a window with its center at the offset media time). If the % window is too small (the offset media time is too close to the temporary % front end of the previous stage movement), we extend its end to be the % offset media time plus the delay time. % % 16. If a stage movement peak is absent, we attempt to shift the media % times backward or forward, relative to the stage movement peaks, % depending on whether the current peak is closer to the next or previous % media time, respectively. When shifting the media times backward, we % assume the first stage movement occurred prior to video recording and, % therefore, throw away its media time and location. When shifting the % media times forward, we look for a spare 0 media time and location (see % step 5). If there are no spares, we swallow up all the frames prior to % the end of the first stage movement and label them as an unusable % movement that began prior to recording and bled into the beginning of the % video. % % 17. If we find a stage-movement peak closer to the previous offset media % time than its own supposed offset media time, we assume we have a % misalignment and attempt to shift the media times forward relative to the % stage movement peaks. There are some restrictions to this check since % small-scale, frame jitter can misrepresent the reported media time. % % 18. The final logged stage motion may occur after the video ends and, as % a result, may have no representation in the frame-difference variance. % Therefore, for the last stage movement, we check our threshold (see step % 10) to ensure that it separates 99% of the smaller values and, thereby, % picks up stage movement rather than splitting the non-movement modality. % % % % FUNCTION [FRAMES INDICES LOCATIONS] = ... % FINDSTAGEMOVEMENT(INFOFILE, LOGFILE, DIFFFILE, VERBOSE) % % FUNCTION [FRAMES INDICES LOCATIONS] = ... % FINDSTAGEMOVEMENT(INFOFILE, LOGFILE, DIFFFILE, VERBOSE, GUIHANDLE) % % Input: % infoFile - the XML file with the experiment information % logFile - the CSV file with the stage locations % diffFile - the MAT file with the video differentiation % verbose - verbose mode 1 shows the results in a figure % verbose mode 2 labels the stage movements in the figure % guiHandle - the GUI handle to use when showing the results; % if empty, the results are shown in a new figure % % Output: % frames - a vector of frame status % true = the frame contains stage movement % false = the frame does NOT contain stage movement % NaN = the original video frame was dropped % Note: video frames are indexed from 0, Matlab indexes % from 1, please adjust your calculations accordingly % movesI - a 2-D matrix with, respectively, the start and end % frame indices of stage movements % locations - the location of the stage after each stage movement % % See also VIDEO2DIFF % % % © Medical Research Council 2012 % You will not remove any copyright or other notices from the Software; % you must reproduce all copyright notices and other proprietary % notices on any copies of the Software. ''' #%% mediaTimes, locations, delayFrames, fps, spareZeroTimeLocation = \ _initial_checks(mediaTimes, locations, delayFrames, fps) frameDiffs = _norm_frame_diffs(frameDiffs) # Compute the global Otsu and small frame-difference thresholds. # Note 1: we use the Otsu to locate frame-difference peaks corresponding to # stage movement. # Note 2: we use the small threshold to locate the boundaries between # frame differences corresponding to stage movement and frame differences # corresponding to a non-movement interval. gOtsuThr = graythreshmat(frameDiffs); gSmallDiffs, gSmallThr = _get_small_otsu(frameDiffs, gOtsuThr) maxMoveFrames = delayFrames + 1; #% maximum frames a movement takes var_init = \ _init_search(frameDiffs, gOtsuThr, gSmallDiffs, gSmallThr, mediaTimes, maxMoveFrames, fps) if var_init is not None: frames, movesI, prevPeakI, prevPeakEndI, maxMoveTime, timeOff = var_init else: #return empty vectors if there was no movement if len(locations) == 0: locations = np.zeros((1,2)); return np.zeros(frameDiffs.size, np.int), np.zeros((1,2), np.int), locations #% Match the media time-stage movements to the frame-difference peaks. mediaTimeOff = 0.; #% the offset media time prevOtsuThr = gOtsuThr; #% the previous small threshold prevSmallThr = gSmallThr; #% the previous small threshold isShifted = False; #% have we shifted the data to try another alignment? #%% #AEJ I am using a while instead of a for to be able to go back i = 0 while i < mediaTimes.size-1: i += 1 #%% # Compute the offset media time. prevMediaTimeOff = mediaTimeOff; mediaTimeOff = mediaTimes[i] + timeOff; mediaTimeOffI = int(round(mediaTimeOff * fps)); inputs_args = (frameDiffs, prevPeakEndI, mediaTimeOffI, maxMoveFrames) searchDiffs, startI, endI = _get_search_diff(*inputs_args) inputs_args = (frameDiffs, searchDiffs, gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr) isOtsu, otsuThr = get_otsu_thresh(*inputs_args) #% If we're at the end, make sure we're using an appropriate threshold. if i == mediaTimes.size-1: #% Does the threshold separate the 99% of the small frame #% differences from the large ones? smallDiffs, smallThr = _get_small_otsu(searchDiffs, otsuThr) isOtsu = (smallDiffs.size>0) & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr); inputs_args = (frameDiffs, searchDiffs, isOtsu, otsuThr, gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr, maxMoveFrames) indices, prevOtsuThr, prevSmallThr = _get_peak_indices(*inputs_args) #%% #% We can't find any distinguishably large peaks. peakI = np.nan; if len(indices) == 0: #% Does the last stage movement occur after the video ends? if i == mediaTimes.size-1 and endI >= frameDiffs.size-1: #% Does the last offset media time occur before the video ends? if mediaTimeOff < (frameDiffs.size - 1) / fps: dd = 'LastPeak ' \ 'The search window for the last stage movement ({}) ' \ 'at media time {:.3f} seconds (frame {} ) offset to {:.3} ' \ 'seconds (frame {}) to the last frame {:.3} seconds ' \ '(frame {}), does not have any distinguishably large peaks. '\ 'The peak probably occured after the video ended and, ' \ 'therefore, the last stage movement will be ignored.' dd = dd.format(i, mediaTimes[i], round(mediaTimes[i] * fps), mediaTimeOff, startI - 1, (endI - 1) / fps, endI - 1 ) warnings.warn(dd) # Ignore the last stage movement. mediaTimes = mediaTimes[:-1] locations = locations[:-1] movesI = movesI[:-1] break; #% Report the warning. dd = 'NoPeaks ' \ 'The search window for stage movement ({}) ' \ 'at media time {:.3f} seconds (frame {} ) offset to {:.3} ' \ 'seconds (frame {}) to the last frame {:.3} seconds ' \ '(frame {}), does not have any distinguishably large peaks.' dd = dd.format(i+1, mediaTimes[i], round(mediaTimes[i] * fps), mediaTimeOff, startI - 1, (endI - 1) / fps, endI - 1 ) warnings.warn(dd) # Use the first peak. else: peakI = indices[0] + startI #% Is the current offset media time further from the frame- #% difference stage movement than the previous offset media time? peakTime = peakI / fps; timeDiff = mediaTimeOff - peakTime; prevTimeDiff = prevMediaTimeOff - peakTime; #%% if (i > 1) and \ ((abs(prevTimeDiff) > maxMoveTime) or \ (abs(timeDiff) > maxMoveTime)) and \ (mediaTimeOff > prevMediaTimeOff) and \ (abs(timeDiff / prevTimeDiff) > 2): #% Report the warning. dd = ['FarPeak', 'Stage movement ({})'.format(i+1), '(at media time {:.3f} seconds)'.format(mediaTimes[i]), 'offset to {:.3} seconds,'.format(mediaTimeOff), 'has its frame-difference peak at {:.3} seconds (frame {}),'.format(peakTime, peakI - 1), 'an error of {:.3} seconds.'.format(timeDiff), 'The previous media time, offset to {:.3} seconds,'.format(prevMediaTimeOff), 'is closer with an error only {:.3} seconds'.format(prevTimeDiff), '(less than half the current media time error). ', 'Therefore, we probably have either a false ', 'peak, a shifted misalignment, or an abnormally long delay.' ] dd = ' '.join(dd) warnings.warn(dd) #% Ignore this wrong peak. peakI = np.nan; #%% #% Can we realign (shift) the stage movements and media times? if np.isnan(peakI): lastMoveTime = movesI[i - 1, 0] / fps; isShiftable = True; if isShifted: isShiftable = False; #% Shift the media times forward. elif i > 1 and \ abs(mediaTimes[i - 2] - lastMoveTime) < abs(mediaTimes[i] - lastMoveTime): #% Would a time shift align the media times with the #% frame-difference stage movements? for j in range(1, i - 1): #% Compute the error from the predicted time. offset = movesI[j,0] / fps - mediaTimes[j - 1]; predictedTime = mediaTimes[j] + offset; moveTime = movesI[j + 1,0] / fps; timeDiff = abs(predictedTime - moveTime); #% Compute the interval between the media times. mediaDiff = mediaTimes[j] - mediaTimes[j - 1]; #% Is the error in the predicted time greater than #% the interval between media times? if timeDiff > mediaDiff: isShiftable = False; break; #% Time cannot be shifted due to misalignment between the media #% times and frame-difference stage movements. if not isShiftable: dd = 'TimeShiftAlignment ' \ 'Time cannot be shifted forward because the' \ ' frame-difference stage movement at {:.3}'\ ' seconds would have a'\ ' predicted time of {:.3}'\ ' seconds (an error of {:.3}' \ ' seconds) whereas the interval between its media' \ ' time and the previous media time is only {:.3}' \ ' seconds and,' \ ' therefore, smaller than the error from shifting.' dd = dd.format(moveTime, predictedTime, timeDiff, mediaDiff ) warnings.warn(dd); #% Shift everything forward using the spare 0 media time location. elif len(spareZeroTimeLocation)>0: mediaTimes = np.insert(mediaTimes, 0,0) locations = np.vstack((spareZeroTimeLocation, locations)) movesI = np.vstack((movesI, np.zeros((1,2), np.int))) timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1]; #% Redo the match. i = i - 1; #% Warn about the time shift. warnings.warn('TimeShiftForward : ' \ 'Shifting the media times forward relative to the ' \ 'frame-difference stage movements (using a spare ' \ 'location at media time 0:0:0.000) in an attempt ' \ 'to realign them'); #% Shift everything forward by assuming a missing 0 media time #% location and swallowing earlier frames into the the first #% stage movement. else: frames[:movesI[1,0]] = True; movesI[:(i - 1),:] = movesI[1:i,:]; movesI[0,0] = 0; timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1]; #% Redo the match. i = i - 2; #% Warn about the time shift. warnings.warn('TimeShiftForward : ' \ 'Shifting the media times forward relative to the ' \ 'frame-difference stage movements (by swallowing ' \ 'earlier frames into the first stage movement) in ' \ 'an attempt to realign them'); # Shift the media times backward. else: #% Would a time shift align the media times with the #% frame-difference stage movements? for j in range(2, i - 1): #% Compute the error from the predicted time. offset = movesI[j - 1,0] / fps - mediaTimes[j]; predictedTime = mediaTimes[j + 1] + offset; moveTime = movesI[j,0] / fps; timeDiff = np.abs(predictedTime - moveTime); #% Compute the interval between the media times. mediaDiff = mediaTimes[j + 1] - mediaTimes[j]; #% Is the error in the predicted time greater than the #% interval between media times? if timeDiff > mediaDiff: isShiftable = False; break; #% Time cannot be shifted due to misalignment between the media #% times and frame-difference stage movements. if not isShiftable: dd = ['TimeShiftAlignment', 'Time cannot be shifted backward because the', 'frame-difference stage movement at {:.3} seconds'.format(moveTime), 'would have a predicted time of {:.3} seconds'.format(predictedTime), 'seconds (an error of {:.3} seconds)'.format(timeDiff), 'whereas the interval between its media', 'time and the previous one is only {:.3} seconds'.format(mediaDiff), 'and, therefore, smaller than the error from shifting' ] warnings.warn(' '.join(dd)) #% Shift everything backward. else: mediaTimes = mediaTimes[1:]; locations = locations[1:]; movesI = movesI[:-1]; timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1]; #% Redo the match. i = i - 1; #% Warn about the time shift. dd = 'TimeShiftBackward : ' \ 'Shifting the media times backward relative to ' \ 'the frame-difference stage movements in an ' \ 'attempt to realign them'\ warnings.warn(dd); #% Record the shift and continue. if isShiftable: isShifted = True; continue; #% We cannot realign (shift) the stage movements and media times. else: #% Compute the stage movement sizes. movesI = movesI[:i,:] moveSizes = np.zeros((movesI.shape[0],1)); for j in range(2, movesI.shape[0] - 1): moveDiffs = frameDiffs[movesI[j,0]:movesI[j,1]]; moveSizes[j] = np.nansum(moveDiffs) #% Compute the statistics for stage movement sizes. meanMoveSize = np.nanmean(moveSizes[1:]); stdMoveSize = _matlab_std(moveSizes[1:]); smallMoveThr = meanMoveSize - 2.5 * stdMoveSize; largeMoveThr = meanMoveSize + 2.5 * stdMoveSize; #% Are any of the stage movements considerably small or large? for j in range(1, movesI.shape[0]-1): if moveSizes[j] < smallMoveThr: #% Is the stage movement small? before_f = movesI[j,0] - 1 after_f = movesI[j,1] - 1 #% Report the warning. dd = ['ShortMove', 'Stage movement {}'.format(j), 'at media time {:.3}'.format(mediaTimes[j]), 'seconds (frame {}),'.format(int(round(mediaTimes[j] * fps))), 'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f), 'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f), 'is considerably small' ] elif moveSizes[j] > largeMoveThr: #% Is the stage movement large? before_f = movesI[j,0] - 1 after_f = movesI[j,1] - 1 dd = ['LongMove', 'Stage movement {}'.format(j), 'at media time {:.3}'.format(mediaTimes[j]), 'seconds (frame {}),'.format(int(round(mediaTimes[j] * fps))), 'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f), 'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f), 'is considerably large' ] #% Construct the report. msg = 'NoShift : We cannot find a matching peak nor shift the time ' \ 'for stage movement {} at media time {:.3} seconds (frame {}).' \ .format(i+1, mediaTimes[i], int(round(mediaTimes[i] * fps)) ) raise(ValueError(msg)); if np.isnan(peakI): continue #% Find a temporary back end for this stage movement. #% Note: this peak may serve as its own temporary back end. startI = max(peakI - maxMoveFrames, prevPeakEndI); dd = frameDiffs[startI:peakI+1][::-1] j = np.nanargmin(dd) minDiff = dd[j] peakBackEndI = peakI - j; #% we flipped to choose the last min j = peakI - 1; #% If the temporary back end's frame difference is small, try to push #% the back end forwards (closer to the stage movement). if minDiff <= prevSmallThr: while j > startI: if frameDiffs[j] <= prevSmallThr: peakBackEndI = j; break; j -= 1; #% If the temporary back end's frame difference is large, try to push #% the back end backwards (further from the stage movement). elif minDiff >= min(otsuThr, gOtsuThr) or \ (minDiff > gSmallThr and peakBackEndI > startI and \ np.all(np.isnan(frameDiffs[startI:(peakBackEndI - 1)]))): peakBackEndI = startI; #% Compute a threshold for stage movement. smallDiffs = frameDiffs[prevPeakEndI:peakBackEndI+1]; smallThr = np.nanmean(smallDiffs) + 3*_matlab_std(smallDiffs); if np.isnan(smallThr): smallThr = prevSmallThr; #% Find the front end for the previous stage movement. #set the range using the previous peak as range j = prevPeakI; while j < peakI and \ (np.isnan(frameDiffs[j]) or \ frameDiffs[j] > smallThr) and \ (np.isnan(frameDiffs[j + 1]) or \ frameDiffs[j + 1] > smallThr): j = j + 1; movesI[i - 1, 1] = j prevPeakEndI = j-1; #%% #% Mark the previous stage movement. if movesI[i - 1,0] < 1: frames[:movesI[i - 1,1]] = True; else: frames[movesI[i - 1,0]:movesI[i - 1,1]] = True; #% Find the back end for this stage movement. j = peakI; while j > prevPeakEndI and \ (np.isnan(frameDiffs[j]) or frameDiffs[j] > smallThr): j -= 1; movesI[i, 0] = j + 1; #% Is the non-movement frame-differences threshold too large? if smallThr <= otsuThr and (np.isnan(gOtsuThr) or smallThr <= gOtsuThr): prevOtsuThr = otsuThr; prevSmallThr = smallThr; else: before_f = movesI[i - 1,1] - 1 after_f = movesI[i - 1,0] - 1 dd = ['LargeNonMovementThreshold', 'The non-movement window between stage movement {}'.format(i-1), 'and stage movement {}'.format(i), 'from {:.3} (frame {})'.format(before_f / fps, before_f), 'to {:.3} (frame {})'.format(before_f / fps, after_f), 'contains considerably large frame-difference variance' ] warnings.warn(' '.join(dd)) #% Compute the media time offset. timeOff = peakTime - mediaTimes[i]; #% We reached the end. endI = peakI + maxMoveFrames; if endI >= frameDiffs.size: peakFrontEndI = frameDiffs.size-1; #% Find a temporary front end for this stage movement. else: dd = frameDiffs[peakI+1:endI+1] if not np.all(np.isnan(dd)): j = np.nanargmin(dd) minDiff = dd[j] peakFrontEndI = peakI + j + 1; #% If the temporary front end's frame difference is large, try to #% push the front end forwards (further from the stage movement). if (minDiff >= min(otsuThr, gOtsuThr)) or \ (minDiff > max(smallThr, gSmallThr) and \ (peakFrontEndI < endI) and \ np.all(np.isnan(frameDiffs[(peakFrontEndI + 1):endI+1]))): peakFrontEndI = endI #% Try to push the temporary front end backwards (closer to the stage #% movement). j = peakI + 1; while j < peakFrontEndI: if frameDiffs[j] <= smallThr: peakFrontEndI = j; break; j = j + 1; #% Advance. prevPeakI = peakI; prevPeakEndI = peakFrontEndI; #% Do the frame differences end with a stage movement? if prevPeakEndI > frameDiffs.size: movesI[-1, 1] = frameDiffs.size; frames[movesI[-1,0]:] = True; movesI = np.vstack(movesI, np.full((1,2), frameDiffs.size+1)) #% Find the front end for the last stage movement. else: #% Is the Otsu threshold large enough? searchDiffs = frameDiffs[prevPeakEndI:]; otsuThr = graythreshmat(searchDiffs); isOtsu = otsuThr > gOtsuThr; #% false if no global Otsu if not isOtsu: #% Does the Otsu threshold separate the 99% of the small frame #% differences from the large ones? And, if there is a global small #% threshold, is the Otsu threshold larger? smallDiffs, smallThr = _get_small_otsu(frameDiffs, gOtsuThr) isOtsu = (smallDiffs.size>0) & \ np.any(~np.isnan(smallDiffs)) & \ (otsuThr >= smallThr); isOtsu = isOtsu & (np.isnan(gSmallThr) | (otsuThr > gSmallThr)) #% Does the global Otsu threshold pull out any peaks? if not isOtsu: if not np.isnan(gOtsuThr) and (np.sum(searchDiffs > gOtsuThr) > 1): otsuThr = gOtsuThr; isOtsu = True; #% Are there any large frame difference past the last stage movement? isExtraPeaks = False; if not isOtsu: peakI = frameDiffs.size; peakBackEndI = frameDiffs.size-1 #% There are too many large frame-difference peaks. else: _, indices = maxPeaksDistHeight(searchDiffs, maxMoveFrames, otsuThr); isExtraPeaks = len(indices)>0; #% Find the first large peak past the last stage movement. i = prevPeakEndI; while (i < frameDiffs.size-1) and \ (np.isnan(frameDiffs[i]) or (frameDiffs[i] < otsuThr)): i = i + 1; peakI = i; #% Find a temporary back end for this large peak. #% Note: this peak may serve as its own temporary back end. startI = max(peakI - maxMoveFrames, prevPeakEndI); dd = frameDiffs[startI:peakI+1][::-1] i = np.nanargmin(dd) minDiff = dd[i] peakBackEndI = peakI - i + 1; #% we flipped to choose the last min #% If the temporary back end's frame difference is small, try to #% push the back end forwards (closer to the stage movement). if minDiff <= prevSmallThr: i = peakI - 1; while i > startI: if frameDiffs[i] <= prevSmallThr: peakBackEndI = i; break; i = i - 1; #% If the temporary back end's frame difference is large, try to #% push the back end backwards (further from the stage movement). elif minDiff >= min(otsuThr, gOtsuThr) or \ ((minDiff > gSmallThr) and (peakBackEndI > startI) and \ np.all(np.isnan(frameDiffs[startI:(peakBackEndI - 1)]))): peakBackEndI = startI; #% Compute a threshold for stage movement. smallDiffs = frameDiffs[prevPeakEndI:peakBackEndI+1]; smallThr = np.nanmean(smallDiffs) + 3 * np.nanstd(smallDiffs, ddof=1); if np.isnan(smallThr): smallThr = prevSmallThr; #% Find the front end for the last logged stage movement. i = prevPeakI; while (i < peakI) and (i < frameDiffs.size-1) and \ (np.isnan(frameDiffs[i]) or (frameDiffs[i] > smallThr)) and \ (np.isnan(frameDiffs[i + 1]) or (frameDiffs[i + 1] > smallThr)): i = i + 1; movesI[-1,1] = i; prevPeakEndI = i-1; #% Mark the last logged stage movement. if movesI.shape[0] == 1: frames[:movesI[-1, 1]] = True else: frames[movesI[-1,0]:movesI[-1,1]] = True #% Are there any large frame-difference peaks after the last logged #% stage movement? if isExtraPeaks: pass #warning('findStageMovement:TooManyPeaks', ... # ['There are, approximately, ' num2str(length(indices)) ... # ' large frame-difference peaks after the last stage' ... # ' movement ends at ' num2str((movesI(end,2) - 1)/ fps, '%.3f') ... # ' seconds (frame ' num2str(movesI(end,2) - 1) ')']); #% Find the back end for logged stage movements. i = peakI - 1; while (i > prevPeakEndI) and (np.isnan(frameDiffs[i]) or \ (frameDiffs[i] > smallThr)): i = i - 1; movesI = np.vstack((movesI, (i+1, frameDiffs.size+1))) frames[movesI[-1,0]:] = True; #% Are any of the stage movements considerably small or large? if isExtraPeaks: #% Compute the stage movement sizes. movesI = movesI[:i, :] moveSizes = np.zeros((movesI.shape[0],1)); for j in range(1, movesI.shape[0]-1): moveDiffs = frameDiffs[movesI[j,0]:movesI[j,1]]; moveSizes[j] = np.nansum(moveDiffs); #% Compute the statistics for stage movement sizes. meanMoveSize = np.nanmean(moveSizes[1:]); stdMoveSize = np.nanstd(moveSizes[1:], ddof=1); smallMoveThr = meanMoveSize - 2.5 * stdMoveSize; largeMoveThr = meanMoveSize + 2.5 * stdMoveSize; #% Are any of the stage movements considerably small or large? for i in range(1, movesI.shape[0]-1): #% Is the stage movement small? if moveSizes[i] < smallMoveThr: before_f = movesI[i,0] - 1 after_f = movesI[i,1] - 1 #% Report the warning. dd = ['ShortMove', 'Stage movement {}'.format(i), 'at media time {:.3}'.format(mediaTimes[i]), 'seconds (frame {}),'.format(int(round(mediaTimes[i] * fps))), 'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f), 'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f), 'is considerably small' ] warnings.warn(' '.join(dd)) #% Is the stage movement large? elif moveSizes[i] > largeMoveThr: before_f = movesI[i,0] - 1 after_f = movesI[i,1] - 1 #% Report the warning. dd = ['LongMove', 'Stage movement {}'.format(i), 'at media time {:.3}'.format(mediaTimes[i]), 'seconds (frame {}),'.format(int(round(mediaTimes[i] * fps))), 'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f), 'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f), 'is considerably large' ] warnings.warn(' '.join(dd)) return frames, movesI, locations #%% def shift2video_ref(is_stage_move, movesI, stage_locations, video_timestamp_ind): stage_vec = np.full((is_stage_move.size,2), np.nan); if len(movesI) <= 1 and np.all(movesI==0): #%there was no movements stage_vec[:,0] = stage_locations[:, 0]; stage_vec[:,1] = stage_locations[:, 1]; else: #%convert output into a vector that can be added to the skeletons file to obtain the real worm displacements for kk in range(stage_locations.shape[0]): bot = max(0, movesI[kk,1]); top = min(is_stage_move.size, movesI[kk+1,0]); stage_vec[bot:top, 0] = stage_locations[kk,0]; stage_vec[bot:top, 1] = stage_locations[kk,1]; #%the nan values must match the spected video motions #assert(all(isnan(stage_vec(:,1)) == is_stage_move)) # prepare vectors to save into the hdf5 file. #%Go back to the original movie indexing. I do not want to include the missing frames at this point. is_stage_move_d = is_stage_move[video_timestamp_ind].astype(np.int8); stage_vec_d = stage_vec[video_timestamp_ind, :]; return stage_vec_d, is_stage_move_d
mit
6,729,585,383,931,663,000
43.222222
116
0.551167
false
fsxfreak/esys-pbi
src/pupil/pupil_src/shared_modules/frame_publisher.py
2
2998
''' (*)~--------------------------------------------------------------------------- Pupil - eye tracking platform Copyright (C) 2012-2017 Pupil Labs Distributed under the terms of the GNU Lesser General Public License (LGPL v3.0). See COPYING and COPYING.LESSER for license details. ---------------------------------------------------------------------------~(*) ''' from plugin import Plugin from pyglui import ui import numpy as np class Frame_Publisher(Plugin): def __init__(self,g_pool,format='jpeg'): super().__init__(g_pool) self._format = format def init_gui(self): help_str = "Publishes frame data in different formats under the topic \"frame.world\"." self.menu = ui.Growing_Menu('Frame Publisher') self.menu.append(ui.Button('Close',self.close)) self.menu.append(ui.Info_Text(help_str)) self.menu.append(ui.Selector('format',self,selection=["jpeg","yuv","bgr","gray"], labels=["JPEG", "YUV", "BGR", "Gray Image"],label='Format')) self.g_pool.sidebar.append(self.menu) def update(self,frame=None,events={}): if frame and frame.jpeg_buffer: if self.format == "jpeg": data = frame.jpeg_buffer elif self.format == "yuv": data = frame.yuv_buffer elif self.format == "bgr": data = frame.bgr elif self.format == "gray": data = frame.gray # Create serializable object. # Not necessary if __raw_data__ key is used. # blob = memoryview(np.asarray(data).data) blob = data events['frame.world'] = [{ 'topic':'frame', 'width': frame.width, 'height': frame.height, 'index': frame.index, 'timestamp': frame.timestamp, 'format': self.format, '__raw_data__': [blob] }] def on_notify(self,notification): """Publishes frame data in several formats Reacts to notifications: ``eye_process.started``: Re-emits ``frame_publishing.started`` Emits notifications: ``frame_publishing.started``: Frame publishing started ``frame_publishing.stopped``: Frame publishing stopped """ if notification['subject'].startswith('eye_process.started'): # trigger notification self.format = self.format def get_init_dict(self): return {'format':self.format} def close(self): self.alive = False def cleanup(self): self.notify_all({'subject':'frame_publishing.stopped'}) if self.menu: self.g_pool.sidebar.remove(self.menu) self.menu = None @property def format(self): return self._format @format.setter def format(self,value): self._format = value self.notify_all({'subject':'frame_publishing.started','format':value})
mit
2,254,809,564,954,330,000
33.079545
150
0.543362
false
dmitru/pines
pines/trees.py
1
8172
# coding=utf-8 import numpy as np from copy import deepcopy class BinaryDecisionTreeSplit(object): def __init__(self, feature_id, value): self.feature_id = feature_id self.value = value class BinaryDecisionTree(object): """ Implements a binary decision tree with array-based representation. This class itself doesn't contain logic for selection of best splits, etc; instead, it receives DecisionTreeSplit that describe splits and updates the tree accordingly. """ def __init__(self, n_features): """ :param n_features: number of features in dataset. Features have 0-based indices """ self._capacity = 0 self._n_features = n_features self._is_leaf = np.zeros(0, dtype='bool') self._is_node = np.zeros(0, dtype='bool') self._leaf_values = np.zeros(0) self._leaf_functions = [] self._leaf_n_samples = np.zeros(0) self._splits = [] self._capacity = 0 self._reallocate_if_needed(required_capacity=1) self._init_root() def _reallocate_if_needed(self, required_capacity): if self._capacity <= required_capacity: self._is_leaf.resize(required_capacity) self._is_node.resize(required_capacity) self._leaf_values.resize(required_capacity) self._leaf_functions = self._grow_list(self._leaf_functions, required_capacity) self._leaf_n_samples.resize(required_capacity) self._splits = self._grow_list(self._splits, required_capacity) self._capacity = required_capacity def _init_root(self): self._is_leaf[0] = True self._is_node[0] = True self._latest_used_node_id = 0 def num_of_leaves(self): return np.sum(self._is_leaf[:self._latest_used_node_id + 1]) def num_of_nodes(self): return self._latest_used_node_id def is_leaf(self, node_id): assert node_id >= 0 and node_id <= self._latest_used_node_id return self._is_leaf[node_id] def leaf_mask(self): return self._is_leaf[:self._latest_used_node_id + 1] def __str__(self): def helper(cur_node_id, padding='', is_last_leaf_on_level=True): if cur_node_id > self._latest_used_node_id or not self._is_node[cur_node_id]: return '' if self._is_leaf[cur_node_id]: node_str = '{}: {:.2f} (n={})'.format( cur_node_id, self._leaf_values[cur_node_id], int(self._leaf_n_samples[cur_node_id])) else: node_str = '{}: [x[{}] < {:.2f}]? (n={})'.format( cur_node_id, self._splits[cur_node_id].feature_id, self._splits[cur_node_id].value, int(self._leaf_n_samples[cur_node_id]) ) result = padding + ("└── " if is_last_leaf_on_level else "├── ") + node_str + '\n' if is_last_leaf_on_level: new_padding = padding + ' ' else: new_padding = padding + '| ' result += helper(self.left_child(cur_node_id), new_padding, False) result += helper(self.right_child(cur_node_id), new_padding, True) return result return helper(0) def left_child(self, node_id): return (node_id + 1) * 2 - 1 def right_child(self, node_id): return (node_id + 1) * 2 def leaves(self): return np.where(self._is_leaf == True)[0] def split_node(self, node_id, split): """ Modifies the tree, applying the specified node split. The node that is being splitted must be a leaf. After the split, the number of leaves increases by one. :param split: DecisionTreeSplit, describes the split to perform """ assert node_id >= 0 and node_id <= self._latest_used_node_id assert split.feature_id >= 0 and split.feature_id < self._n_features assert self.is_leaf(node_id) == True left_child_id = self.left_child(node_id) right_child_id = self.right_child(node_id) if right_child_id >= self._capacity: self._reallocate_if_needed(2 * self._capacity + 1) self._splits[node_id] = deepcopy(split) self._is_leaf[node_id] = False self._is_node[left_child_id] = True self._is_node[right_child_id] = True self._is_leaf[left_child_id] = True self._is_leaf[right_child_id] = True self._latest_used_node_id = max(self._latest_used_node_id, right_child_id) def predict(self, X): """ :param X: :return: """ def predict_one(x): current_node = self.root() while not self.is_leaf(current_node): current_split = self._splits[current_node] if x[current_split.feature_id] < current_split.value: current_node = self.left_child(current_node) else: current_node = self.right_child(current_node) if self._leaf_functions[current_node] is not None: func, args = self._leaf_functions[current_node] return func(args) return self._leaf_values[current_node] sample_size, features_count = X.shape assert features_count == self._n_features result = np.zeros(sample_size) for i in range(sample_size): x = X[i] result[i] = predict_one(x) return result def apply(self, X): """ Args: X: numpy 2d array Instance-features matrix Returns: numpy int array Array of leaf indices, corresponding to classified instances """ def apply_one(x): current_node = self.root() while not self.is_leaf(current_node): current_split = self._splits[current_node] if x[current_split.feature_id] < current_split.value: current_node = self.left_child(current_node) else: current_node = self.right_child(current_node) return current_node sample_size, features_count = X.shape assert features_count == self._n_features result = np.zeros(sample_size) for i in range(sample_size): x = X[i] result[i] = apply_one(x) return result def root(self): """ :return: Id of the root node """ return 0 def depth(self, node_id): assert node_id >= 0 and node_id <= self._latest_used_node_id return np.floor(np.log2(node_id + 1)) + 1 def nodes_at_level(self, level, kind='all'): """ Args: level: Depth level in the tree, starting from 1 for the root node. kind: 'all', 'internal_nodes' or 'leaves' Returns: List of node ids at the specified level. """ assert kind in ['all', 'internal_nodes', 'leaves'] result = [] for node_id in range(2 ** (level - 1) - 1, min(2 ** level - 1, self._latest_used_node_id + 1)): if kind == 'all': result.append(node_id) elif kind == 'internal_nodes': if self._is_node[node_id]: result.append(node_id) else: if self._is_leaf[node_id]: result.append(node_id) return result def _grow_list(self, list, required_capacity, fill_value=None): """ Returns a list that is at least as long as required_capacity, filling the missing elements with fill_value if needed. If the length of the list is already greater than required_capacity, returns unmodified list. :param list: :param required_capacity: :param fill_value: :return: """ if len(list) >= required_capacity: return list return list + [fill_value for _ in range(required_capacity - len(list))]
mit
2,368,062,335,330,216,400
34.947137
103
0.55098
false
philouc/pyhrf
python/pyhrf/test/test_glm.py
1
3214
import unittest import pyhrf import os.path as op import shutil class NipyGLMTest(unittest.TestCase): def setUp(self): self.tmp_dir = pyhrf.get_tmp_path() def tearDown(self): shutil.rmtree(self.tmp_dir) # def _simulate_bold(self): # boldf, tr, paradigmf, maskf = simulate_bold(output_dir=self.tmp_dir) # glm_nipy_from_files(boldf, tr, paradigmf, output_dir=output_dir, # hcut=0, drift_model='Blank', mask_file=maskf) def test_glm_default_real_data(self): from pyhrf import FmriData from pyhrf.glm import glm_nipy #pyhrf.verbose.set_verbosity(3) fdata = FmriData.from_vol_ui() # print 'fdata:' # print fdata.getSummary() glm_nipy(fdata) def test_glm_contrasts(self): from pyhrf import FmriData from pyhrf.glm import glm_nipy cons = {'audio-video': 'audio - video', 'video-audio': 'video - audio', } #pyhrf.verbose.set_verbosity(3) fdata = FmriData.from_vol_ui() # print 'fdata:' # print fdata.getSummary() g, dm, cons = glm_nipy(fdata, contrasts=cons) def test_glm_with_files(self): #pyhrf.verbose.set_verbosity(1) output_dir = self.tmp_dir bold_name = 'subj0_bold_session0.nii.gz' bold_file = pyhrf.get_data_file_name(bold_name) tr = 2.4 paradigm_name = 'paradigm_loc_av.csv' paradigm_file = pyhrf.get_data_file_name(paradigm_name) mask_name = 'subj0_parcellation.nii.gz' mask_file = pyhrf.get_data_file_name(mask_name) from pyhrf.glm import glm_nipy_from_files glm_nipy_from_files(bold_file, tr, paradigm_file, output_dir, mask_file) self.assertTrue(op.exists(output_dir)) def test_fir_glm(self): from pyhrf import FmriData from pyhrf.glm import glm_nipy #pyhrf.verbose.set_verbosity(3) fdata = FmriData.from_vol_ui() # print 'fdata:' # print fdata.getSummary() glm_nipy(fdata, hrf_model='FIR', fir_delays=range(10)) def makeQuietOutputs(self, xmlFile): from pyhrf import xmlio from pyhrf.xmlio.xmlnumpy import NumpyXMLHandler t = xmlio.fromXML(file(xmlFile).read()) t.set_init_param('output_dir', None) f = open(xmlFile, 'w') f.write(xmlio.toXML(t, handler=NumpyXMLHandler())) f.close() def test_command_line(self): cfg_file = op.join(self.tmp_dir, 'glm.xml') cmd = 'pyhrf_glm_buildcfg -o %s' %(cfg_file) import os if os.system(cmd) != 0 : raise Exception('"' + cmd + '" did not execute correctly') self.makeQuietOutputs(cfg_file) cmd = 'pyhrf_glm_estim -c %s' %cfg_file if os.system(cmd) != 0 : raise Exception('"' + cmd + '" did not execute correctly') def test_suite(): tests = [unittest.makeSuite(NipyGLMTest)] return unittest.TestSuite(tests) if __name__== '__main__': #unittest.main(argv=['pyhrf.test_glm']) runner = unittest.TextTestRunner(verbosity=2) runner.run(test_suite())
gpl-3.0
-4,689,871,972,983,716,000
27.192982
78
0.589919
false
florian-f/sklearn
examples/svm/plot_svm_margin.py
4
2295
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the seperation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distrubution, and will only consider points close to line of seperation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ print(__doc__) # Code source: Gael Varoqueux # Modified for Documentation merge by Jaques Grobler # License: BSD import numpy as np import pylab as pl from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penality in (('unreg', 1), ('reg', 0.05)): clf = svm.SVC(kernel='linear', C=penality) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin # plot the line, the points, and the nearest vectors to the plane pl.figure(fignum, figsize=(4, 3)) pl.clf() pl.plot(xx, yy, 'k-') pl.plot(xx, yy_down, 'k--') pl.plot(xx, yy_up, 'k--') pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=pl.cm.Paired) pl.axis('tight') x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.predict(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) pl.figure(fignum, figsize=(4, 3)) pl.pcolormesh(XX, YY, Z, cmap=pl.cm.Paired) pl.xlim(x_min, x_max) pl.ylim(y_min, y_max) pl.xticks(()) pl.yticks(()) fignum = fignum + 1 pl.show()
bsd-3-clause
-3,608,698,199,744,564,700
25.37931
76
0.582571
false
Marcello-Sega/pytim
setup.py
1
5678
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 """A python based tool for interfacial molecules analysis """ # To use a consistent encoding import codecs import os import sys # Always prefer setuptools over distutils try: from setuptools import find_packages from Cython.Distutils import build_ext import numpy except ImportError as mod_error: mod_name = mod_error.message.split()[3] sys.stderr.write("Error : " + mod_name + " is not installed\n" "Use pip install " + mod_name + "\n") exit(100) from setuptools import setup from setuptools.command.test import test as TestCommand from distutils.extension import Extension class NoseTestCommand(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # Run nose ensuring that argv simulates running nosetests directly import nose nose.run_exit(argv=['nosetests']) pytim_dbscan = Extension( "pytim_dbscan", ["pytim/dbscan_inner.pyx"], language="c++", include_dirs=[numpy.get_include()]) circumradius = Extension( "circumradius", ["pytim/circumradius.pyx"], language="c++", include_dirs=[numpy.get_include()]) here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # This fixes the default architecture flags of Apple's python if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): os.environ['ARCHFLAGS'] = '' # Get version from the file version.py version = {} with open("pytim/version.py") as fp: exec(fp.read(), version) setup( name='pytim', ext_modules=[pytim_dbscan, circumradius], cmdclass={ 'build_ext': build_ext, 'test': NoseTestCommand }, # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version=version['__version__'], description='Python Tool for Interfacial Molecules Analysis', long_description=long_description, # The project's main homepage. url='https://github.com/Marcello-Sega/pytim', # Author details author='Marcello Sega, Balazs Fabian, Gyorgy Hantal, Pal Jedlovszky', author_email='marcello.sega@univie.ac.at', # Choose your license license='GPLv3', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', # Indicate who your project is intended for 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Software Development :: Libraries :: Python Modules', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], # What does your project relate to? keywords='molecular simuations analysis ', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(), # Alternatively, if you want to distribute just a my_module.py, uncomment # this: # py_modules=["my_module"], # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=[ 'MDAnalysis>=1.0.0', 'PyWavelets>=0.5.2', 'numpy>=1.16', 'scipy>=1.1', 'scikit-image>=0.14.2', 'cython>=0.24.1', 'sphinx>=1.4.3', 'matplotlib', 'pytest', 'dask>=1.1.1' ], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] tests_require=['nose>=1.3.7', 'coverage'], # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'pytim': ['data/*'], }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' ## data_files=[('my_data', ['data/data_file'])], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. # entry_points={ # 'console_scripts': [ # 'sample=sample:main', # ], # }, )
gpl-3.0
2,636,920,363,843,867,600
34.710692
94
0.661853
false
Sravan2j/DIGITS
tools/test_create_db.py
1
4429
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. import os.path import tempfile import shutil from cStringIO import StringIO from nose.tools import raises, assert_raises import mock import unittest import PIL.Image import numpy as np from . import create_db as _ class TestInit(): @classmethod def setUpClass(cls): cls.db_name = tempfile.mkdtemp() @classmethod def tearDownClass(cls): try: shutil.rmtree(cls.db_name) except OSError: pass @raises(ValueError) def test_bad_backend(self): """invalid db backend""" _.DbCreator(self.db_name, 'not-a-backend') class TestCreate(): @classmethod def setUpClass(cls): cls.db_name = tempfile.mkdtemp() cls.db = _.DbCreator(cls.db_name, 'leveldb') fd, cls.input_file = tempfile.mkstemp() os.close(fd) # Use the example picture to construct a test input file with open(cls.input_file, 'w') as f: f.write('digits/static/images/mona_lisa.jpg 0') @classmethod def tearDownClass(cls): os.remove(cls.input_file) try: shutil.rmtree(cls.db_name) except OSError: pass def test_create_no_input_file(self): """create with no image input file""" assert not self.db.create('', width=0, height=0), 'database should not allow empty input file' def test_create_bad_height_width(self): """create with bad height and width for images""" assert not self.db.create( self.input_file, width=-1, height=-1, resize_mode='crop'), 'database should not allow height == width == -1' def test_create_bad_channel_count(self): """create with bad channel count""" assert not self.db.create( self.input_file, width=200, height=200, channels=0, resize_mode='crop'), 'database should not allow channels == 0' def test_create_bad_resize_mode(self): """create with bad resize mode""" assert not self.db.create( self.input_file, width=200, height=200, resize_mode='slurp'), 'database should not allow bad resize mode slurp' def test_create_bad_image_folder(self): """create with bad image folder path""" assert not self.db.create( self.input_file, width=200, height=200, resize_mode='crop', image_folder='/clearly/a/wrong/folder'), 'database should not allow bad image folder' def test_create_normal(self): assert self.db.create( self.input_file, width=200, height=200, resize_mode='crop'), 'database should complete building normally' class TestPathToDatum(): @classmethod def setUpClass(cls): cls.tmpdir = tempfile.mkdtemp() cls.db_name = tempfile.mkdtemp(dir=cls.tmpdir) cls.db = _.DbCreator(cls.db_name, 'lmdb') _handle, cls.image_path = tempfile.mkstemp(dir=cls.tmpdir, suffix='.jpg') with open(cls.image_path, 'w') as outfile: PIL.Image.fromarray(np.zeros((10,10,3),dtype=np.uint8)).save(outfile, format='JPEG', quality=100) @classmethod def tearDownClass(cls): try: shutil.rmtree(cls.tmpdir) except OSError: pass def test_configs(self): """path_to_datum""" self.db.height = 10 self.db.width = 10 self.db.resize_mode = 'squash' self.db.image_folder = None for e in ['none', 'png', 'jpg']: for c in [1, 3]: for m in [True, False]: yield self.check_configs, (e, c, m) def check_configs(self, args): e, c, m = args self.db.encoding = e self.db.channels = c self.db.compute_mean = m image_sum = self.db.initial_image_sum() d = self.db.path_to_datum(self.image_path, 0, image_sum) assert (d.channels, d.height, d.width) == (self.db.channels, self.db.height, self.db.width), 'wrong datum shape' if e == 'none': assert not d.encoded, 'datum should not be encoded when encoding="%s"' % e else: assert d.encoded, 'datum should be encoded when encoding="%s"' % e class TestSaveMean(): pass
bsd-3-clause
7,553,932,435,437,362,000
29.972028
120
0.582298
false
arizona-phonological-imaging-lab/Autotrace
under-development/a3/roi.py
2
5643
#!/usr/bin/env python3 from __future__ import division import numpy as np import json class ROI(object): """ Region of Interest for a set of images Attributes: shape (tuple of numeric): the height and width of the ROI offset (tuple of numeric): the lower bounds of the ROI extent (tuple of numeric): the upper bounds of the ROI offset[dim] + shape[dim] should always == extent[dim] orthodox (tuple of bool): whether the ROI is indexed "normally" I.e. if the ROI is measured from the top/left If measured from the bottom-left: (False, True) slice (tuple of slice): can be used to slice into a 2d matrix >>> np.identity(5)[ROI(2,3,1,4).slice] array([[ 0., 1., 0.]]) """ def __init__(self,*args,**kwargs): """ Multiple possible ways of declaring an ROI are supported. The first way is by specifying the bounds as positional args Args: top (numeric): the top of the region of interest bottom (numeric): the bottom of the region of interest left (numeric): the left edge of the region of interest right (numeric): the right edge of the region of interest Example: >>> ROI(1,2,3,4) ROI(1.0, 2.0, 3.0, 4.0) The second way is by specifying a single iterable object Example: >>> ROI(1,2,3,4) == ROI([1,2,3,4]) True Regardless of the constructor format used, the order should always be: top, bottom, left, right This allows for symantic interpretation of the arguments. ROI is smart enough to deal with indexing from other edges Example: >>> ROI(2,1,4,3).slice (slice(1.0, 2.0, None), slice(3.0, 4.0, None)) >>> ROI(2,1,4,3).top 2.0 """ if len(args) == 4: roi = (args[0],args[1],args[2],args[3]) elif len(args) == 1: roi = args [0] (top, bottom, left, right) = [float(x) for x in roi] self.orthodox = (top<bottom, left<right) self.shape = (abs(top-bottom), abs(left-right)) self.offset = (min(top,bottom), min(left,right)) self.extent = (max(top,bottom), max(left,right)) self.slice = (slice(self.offset[0],self.extent[0]), slice(self.offset[1],self.extent[1])) @property def top(self): """Convenience property for the top of the ROI For an orthodox ROI, this is the same as offset[0] For an ROI unorthodox in the Y dimension, this is extent[0] """ return self.offset[0] if self.orthodox[0] else self.extent[0] @property def bottom(self): """Convenience property for the bottom of the ROI For an orthodox ROI, this is the same as extent[0] For an ROI unorthodox in the Y dimension, this is offset[0] """ return self.extent[0] if self.orthodox[0] else self.offset[0] @property def left(self): """Convenience property for the left of the ROI For an orthodox ROI, this is the same as offset[1] For an ROI unorthodox in the X dimension, this is extent[1] """ return self.offset[1] if self.orthodox[1] else self.extent[1] @property def right(self): """Convenience property for the right of the ROI For an orthodox ROI, this is the same as extent[1] For an ROI unorthodox in the X dimension, this is offset[1] """ return self.extent[1] if self.orthodox[1] else self.offset[1] @property def height(self): """Convenience property for the height of the ROI This is the same as shape[0] """ return self.shape[0] @property def width(self): """Convenience property for the width of the ROI This is the same as shape[1] """ return self.shape[1] def __repr__(self): return 'ROI(%s, %s, %s, %s)' % tuple(self) def __eq__(self,other): return repr(self) == repr(other) def __iter__(self): """Iterate over ROI bounds Yields: numeric: top, bottom, left, right (strictly ordered) """ return (x for x in (self.top,self.bottom,self.left,self.right)) def domain(self,N): """Returns a numpy array of N equally-spaced x values in the ROI Args: N (integer): number of points to create Returns: numpy array: N evenly-spaced points, from offset[1] to extent[1] (includes neither offset[1] nor extent[1]) The dtype should be float32 Example: >>> ROI(x,y,10,20).domain(3) array([12.5,15.,17.5]) """ step = self.shape[1] / (N + 1) return np.arange(self.offset[1] + step, self.extent[1], step) def json(self): """json stringify the ROI""" j = { 'srcY': self.offset[0], 'destY': self.shape[0], 'srcX': self.offset[1], 'destX': self.shape[1], } return json.dumps(j) def scale(self,factor): """Create a scaled version of the current ROI. Args: factor (numeric): the factor by which to scale. Returns: ROI: the scaled ROI Example: >>> ROI(1,2,3,4).scale(2.5) ROI(2.5, 5.0, 7.5, 10.0) """ return ROI(np.array(tuple(self))*factor)
mit
4,787,143,582,170,665,000
34.049689
72
0.550771
false
dirmeier/dataframe
tests/test_cases.py
1
2832
# dataframe: a data-frame implementation using method piping # # Copyright (C) 2016 Simon Dirmeier # # This file is part of dataframe. # # dataframe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # dataframe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with dataframe. If not, see <http://www.gnu.org/licenses/>. # # # @author = 'Simon Dirmeier' # @email = 'mail@simon-dirmeier.net' import pytest import unittest import dataframe import scipy.stats as sps from dataframe import group, modify, subset, aggregate from sklearn import datasets import re from dataframe import Callable from statistics import mean class Mean(Callable): def __call__(self, *args): vals = args[0].values return mean(vals) class Zscore(Callable): def __call__(self, *args): vals = args[0].values return sps.zscore(vals).tolist() iris_data = datasets.load_iris() features = [re.sub("\s|cm|\(|\)", "", x) for x in iris_data.feature_names] data = {features[i]: iris_data.data[:, i] for i in range(len(iris_data.data[1, :]))} data["target"] = iris_data.target frame = dataframe.DataFrame(**data) >> group("target") print(frame) # k = frame >> group("target") # print(k) # # k = frame >> group("target") >> group("petalwidth") # print(k) # # k = group(frame, "target") # print(k) # # k = aggregate(frame, Mean, "mean", "petalwidth") # print(k) # # k = frame >> aggregate(Mean, "mean", "petalwidth") # print(k) # # k = frame >> group("target") >> aggregate(Mean, "mean", "petalwidth") # print(k) # # k = frame >> group("target") >> modify(Zscore, "zscore", "petalwidth") # print(k) # # k = group(frame, "target") >> modify(Zscore, "zscore", "petalwidth") # print(k) # # k = modify(frame, Zscore, "zscore", "petalwidth") # print(k) # # k = frame >> modify(Zscore, "zscore", "petalwidth") # print(k) # # k = frame >> modify(Zscore, "zscore", "petalwidth") >> subset("zscore") # print(k) # print(k.ncol) # k = frame >> subset("petalwidth") # print(k) # # k = frame >> modify(Zscore, "zscore", "petalwidth") >> group("target") >> \ # aggregate(Mean, "mz", "zscore") # print(k) # k = frame >> \ # group("target") >> \ # modify(Zscore, "z", "petalwidth") >> \ # subset("z") >> \ # aggregate(Mean, "m", "z") # # print(k) # frame = dataframe.DataFrame(**data) # k = frame.aggregate(Mean, "mean", "petallength") # # print(k)
gpl-3.0
-3,136,135,424,214,697,500
24.754545
77
0.647952
false
DavideCanton/Python3
pyIAprove/labyrinth.py
1
12404
from math import sqrt import numpy as np from collections import defaultdict, namedtuple from PIL import Image DIRS = U, L, D, R, UL, UR, DL, DR = [(0, -1), (-1, 0), (0, 1), (1, 0), (-1, -1), (1, -1), (-1, 1), (1, 1)] def dist_2(p1, p2): dx = p1[0] - p2[0] dy = p1[1] - p2[1] return sqrt(dx * dx + dy * dy) def vsum(x, y): return tuple([a + b for a, b in zip(x, y)]) class NeighboursGenerator: def __init__(self, labyrinth): self.labyrinth = labyrinth def _U(self, x, y): return y > 0 and self.labyrinth[x, y - 1] == 1 def _D(self, x, y): return y < self.labyrinth.h - 1 and self.labyrinth[x, y + 1] == 1 def _L(self, x, y): return x > 0 and self.labyrinth[x - 1, y] == 1 def _R(self, x, y): return x < self.labyrinth.w - 1 and self.labyrinth[x + 1, y] == 1 def __call__(self, n, dir=None): x, y = n if self._U(x, y): yield vsum(n, U), 1 if self._D(x, y): yield vsum(n, D), 1 if self._L(x, y): yield vsum(n, L), 1 if self._R(x, y): yield vsum(n, R), 1 SQRT_2 = sqrt(2) MAX_ALIVE = 10000 class NeighboursGeneratorDiag(NeighboursGenerator): def __init__(self, labyrinth): NeighboursGenerator.__init__(self, labyrinth) def __call__(self, n, dir=None): x, y = n if self._U(x, y): yield vsum(n, U), 1 if self._D(x, y): yield vsum(n, D), 1 if self._L(x, y): yield vsum(n, L), 1 if self._R(x, y): yield vsum(n, R), 1 if (self._U(x, y) and self._L(x, y - 1) or self._L(x, y) and self._U(x - 1, y)): yield vsum(n, UL), SQRT_2 if (self._U(x, y) and self._R(x, y - 1) or self._R(x, y) and self._U(x + 1, y)): yield vsum(n, UR), SQRT_2 if (self._D(x, y) and self._L(x, y + 1) or self._L(x, y) and self._D(x - 1, y)): yield vsum(n, DL), SQRT_2 if (self._D(x, y) and self._R(x, y + 1) or self._R(x, y) and self._D(x + 1, y)): yield vsum(n, DR), SQRT_2 class NeighborsGeneratorPruning(NeighboursGeneratorDiag): def __init__(self, labyrinth): NeighboursGeneratorDiag.__init__(self, labyrinth) def __call__(self, current, parent=None): neighbors = NeighboursGeneratorDiag.__call__(self, current) if parent is None: yield from neighbors else: current = np.array(current) neighbors = [np.array(n[0]) for n in neighbors] parent = np.array(parent) move = current - parent move = normalize(move) if move.all(): # se nessuno e' 0 allora e' una mossa diagonale neighbors = self._pruneDiag(neighbors, current, move) else: neighbors = self._pruneStraight(neighbors, current, move) act_neighbors = [] for n in neighbors: print("Called jump from", current, "towards", n - current) n = self._jump(current, n - current, self.labyrinth.goal) print("Returned", n) if n is not None: t = tuple(int(x) for x in n) act_neighbors.append((t, dist_2(current, n))) yield from act_neighbors def compute_forcedStraight(self, n, move): pruned = [] for direct in orthogonal(move): dirt = n + direct if dirt in self.labyrinth and self.labyrinth[dirt] == 0: pruned.append(dirt + move) return pruned def compute_forcedDiag(self, parent, move): pruned = [] for c in components(move): ob = parent + c if ob in self.labyrinth and self.labyrinth[ob] == 0: pruned.append(ob + c) return pruned def _pruneStraight(self, neighbors, n, move): pruned = [n + move] pruned.extend(self.compute_forcedStraight(n, move)) return [p for p in pruned if any(np.array_equal(p, x) for x in neighbors)] def _pruneDiag(self, neighbors, n, move): pruned = [n + d for d in components(move)] # if all(self.labyrinth[x] == 1 for x in pruned): pruned.append(n + move) parent = n - move pruned.extend(self.compute_forcedDiag(parent, move)) return [p for p in pruned if any(np.array_equal(p, x) for x in neighbors)] def _jump(self, current, direction, goal): next = current + direction if not self.labyrinth[next] or next not in self.labyrinth: return None if np.array_equal(next, goal): return next isDiag = direction.all() if isDiag: if all(not self.labyrinth[current + dirs] for dirs in components(direction)): return None forced = self.compute_forcedDiag(current, direction) else: forced = self.compute_forcedStraight(next, direction) if any(self.labyrinth[f] for f in forced): return next if isDiag: for dirt in components(direction): if self._jump(next, dirt, goal) is not None: return next return self._jump(next, direction, goal) def _jumpi(self, current, direction, goal): retval = None stack = [Snapshot(current, direction, goal, None, None, 0)] while stack: el = stack.pop() if el.stage == 0: next = el.current + el.direction if not self.labyrinth[next] or next not in self.labyrinth: retval = None continue if np.array_equal(next, el.goal): retval = next continue isDiag = el.direction.all() if isDiag: if all(not self.labyrinth[el.current + dirs] for dirs in components(direction)): retval = None continue forced = self.compute_forcedDiag(el.current, el.direction) else: forced = self.compute_forcedStraight(next, el.direction) if any(self.labyrinth[f] for f in forced): retval = next continue if isDiag: el.stage = 1 el.next = next stack.append(el) dirs = list(components(direction)) el.dirs = dirs snapshot = Snapshot(next, dirs[0], el.goal, next, dirs, 0) stack.append(snapshot) continue else: snapshot = Snapshot(next, el.direction, el.goal, None, None, 0) stack.append(snapshot) continue elif el.stage == 1: r1 = retval if r1 is not None: retval = el.next continue el.stage = 2 stack.append(el) snapshot = Snapshot(el.next, el.dirs[1], el.goal, el.next, el.dirs, 0) stack.append(snapshot) continue elif el.stage == 2: r2 = retval if r2 is not None: retval = el.next continue snapshot = Snapshot(el.next, el.direction, el.goal, None, None, 0) stack.append(snapshot) continue return retval def _jumpi2(self, current, direction, goal): stack = [(current, direction, goal)] while stack: current, direction, goal = stack.pop() next = current + direction if not self.labyrinth[next] or next not in self.labyrinth: return None if np.array_equal(next, goal): return next # assuming n cannot be None isDiag = direction.all() if isDiag: if all(not self.labyrinth[current + dirs] for dirs in components(direction)): return None forced = self.compute_forcedDiag(current, direction) else: forced = self.compute_forcedStraight(next, direction) if any(self.labyrinth[f] for f in forced): return next if isDiag: stack.extend((next, di, goal) for di in components(direction)) else: stack.append((next, direction, goal)) class Snapshot: def __init__(self, current, direction, goal, next, dirs, stage): self.current = current self.direction = direction self.goal = goal self.next = next self.dirs = dirs self.stage = stage def __str__(self): return str(self.__dict__) class Labyrinth: def __init__(self, w, h): self.labyrinth = defaultdict(int) self.w = w self.h = h self.start = None self.goal = None def __getitem__(self, item): return self.labyrinth[tuple(item)] def __contains__(self, pos): return 0 <= pos[0] < self.w and 0 <= pos[1] < self.h def __setitem__(self, key, value): self.labyrinth[tuple(key)] = value def orthogonal(move): move = move.copy() move[[0, 1]] = move[[1, 0]] yield move yield -move def components(move, vert=True): move = move.copy() indexes = (1, 0) if vert else (0, 1) for ind in indexes: d1 = move.copy() d1[ind] = 0 yield d1 def normalize(move): f = move[0] if move[0] else move[1] return move / abs(f) def load_from_img(imgpath): im = Image.open(imgpath) pix = im.load() h, w = im.size labyrinth = Labyrinth(w, h) for i in range(w): for j in range(h): # avoid alpha pixel = pix[j, i][:3] if pixel == (255, 255, 255): labyrinth[i, j] = 1 elif pixel == (255, 0, 0): labyrinth[i, j] = 1 labyrinth.start = i, j elif pixel == (0, 255, 0): labyrinth[i, j] = 1 labyrinth.goal = i, j return labyrinth, im def load_from_map_file(filepath): i, w, h = 0, 0, 0 map_started = False with open(filepath) as map_file: for line in map_file: if line.startswith("height"): w = int(line.split()[1]) elif line.startswith("width"): h = int(line.split()[1]) elif line.startswith("map"): labyrinth = Labyrinth(w, h) map_started = True elif map_started: for j, c in enumerate(line): if c in ".G": labyrinth[i, j] = 1 elif c == "X": labyrinth[i, j] = 1 labyrinth.start = ((i, j)) elif c == "Y": labyrinth[i, j] = 1 labyrinth.goal = ((i, j)) else: labyrinth[i, j] = 0 i += 1 im = lab_to_im(labyrinth) return labyrinth, im def lab_to_im(labyrinth): im = Image.new("RGB", (labyrinth.h, labyrinth.w)) pix = im.load() for i in range(labyrinth.w): for j in range(labyrinth.h): v = labyrinth[i, j] pix[j, i] = (v * 255, v * 255, v * 255) start = labyrinth.start pix[start[1], start[0]] = (255, 0, 0) goal = labyrinth.goal pix[goal[1], goal[0]] = (0, 255, 0) return im if __name__ == "__main__": imgpath = r"D:\labyrinth\lab4.bmp" # imgpath = r"D:\labyrinth\map\arena.map" print("Reading labyrinth from {}...".format(imgpath)) labyrinth, _ = load_from_img(imgpath) print("Read") gen = NeighborsGeneratorPruning(labyrinth) for g in gen((2, 17), parent=(1, 16)): print(g)
gpl-3.0
-3,098,687,144,941,617,000
31.814815
80
0.484844
false
J4sp3r/damrobot
Project/lib/util.py
1
1474
#!/usr/local/bin/python import cv2,os import numpy as np import matplotlib.pyplot as plt from lib import log def imshow(img): cv2.namedWindow("preview") cv2.imshow("preview",img) rval = True while rval: key = cv2.waitKey(27) if key == 27: # exit on ESC break cv2.destroyWindow("preview") def imshow2(img): plt.subplot(111),plt.imshow(img),plt.title('Output') plt.show() def imgresize(img,w,h): return cv2.resize(img,(w, h), interpolation = cv2.INTER_CUBIC) def getpos(img,ratio): width = img.shape[0] width2 = int(width*ratio) height = img.shape[1] height2 = int(height*ratio) pts1 = np.float32([[(width-width2)*0.5-1,(height-height2)*0.5-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5-1],[(width-width2)*0.5-1,(height-height2)*0.5+height2-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5+height2-1]]) pts2 = np.float32([[0,0],[width2-1,0],[0,height2-1],[width2-1,height2-1]]) retval = cv2.getPerspectiveTransform(pts1,pts2) warp = cv2.warpPerspective(img,retval,(width2,height2)) return warp def board2file(board,file): f = open(file,'w') for x in range(8): rule = "" first = True for y in range(8): if first: first = False else: rule += "," rule += str(board[x][y]) if x < 7: rule += "\n" f.write(rule) f.close() def state(path,state): f = open(path + "\\files\\state.txt",'w') f.write(state) f.close() def newboard(path): f = open(path + "\\files\\newbord.txt",'w') f.write("true") f.close()
mit
-8,841,574,868,602,686,000
24
235
0.656716
false
bccp/abopt
abopt/legacy/vmad2.py
1
32837
from __future__ import print_function import warnings import functools import logging logger = logging.getLogger("VMAD") _logging_handler = logging.StreamHandler() logger.addHandler(_logging_handler) # TODO: # Add visualization def ZeroType(): """ creates a special type of ZeroType; """ def self(self, *args): return self def other(self, other): return other def zde(self, a): raise ZeroDivisionError def __sub__(self, a): return -a def __xor__(self, a): return ~a def __int__(self): return 0 def __float__(self): return 0.0 def __round__(self): return 0 def __array__(self, dtype=None): import numpy return numpy.array(0, dtype=dtype) def __repr__(self): return "<ZERO>" dict = {} for name, value in locals().items(): if name.startswith("__"): dict[name] = value for name in [ "neg", "pos", "abs", "invert", "complex", "mul", "rmul", "matmul", "rmatmul", "mod", "divmod", "div", "truediv", "floordiv", "pow", "and", "rand", "lshift", "rlshift", "rshift", "rrshift", "getitem", "reversed"]: dict["__" + name + "__"] = self for name in [ "rmod", "rdivmod", "rdiv", "rtruediv", "rfloordiv", "rpow", "rsub", "rxor"]: dict["__" + name + "__"] = zde for name in [ "add", "radd", "or", "ror"]: dict["__" + name + "__"] = other dict["__repr__"] = __repr__ return type("ZeroType", (object,), dict) ZeroType = ZeroType() ZERO = ZeroType() # decorators def statement(ain, aout): return lambda body: Statement(body, ain, aout) def programme(ain, aout): return lambda body: Programme(body, ain, aout) # create a model using a given computing engine def model(engine): return CodeSegment(engine) class LValue(object): def __init__(self, name, ns): self.ns = ns self.name = name def __getattr__(self, attr): return getattr(self[...], attr) def __repr__(self): return "LValue:%s" % self.name def __getitem__(self, index): return self.ns[self.name] def __setitem__(self, index, value): self.ns[self.name] = value class Literal(object): def __init__(self, value): self.value = value def __repr__(self): return "Literal:%s" % _short_repr(self.value) class Primitive(object): def __init__(self, body, ain, aout, argnames=None): self.body = body self.ain = ain self.aout = aout if argnames is None: argnames = body.__code__.co_varnames[1:body.__code__.co_argcount] if getattr(body, '__kwdefaults__', None) is not None: self.defaults = dict(body.__kwdefaults__) elif getattr(body, '__defaults__', None) is not None: self.defaults = dict(zip(argnames[-len(body.__defaults__):], body.__defaults__)) else: self.defaults = {} self.argnames = argnames for an in ain: if not an in self.argnames: raise ValueError( "argument `%s` of ain in microcode decorator is not declared by function `%s`" % (an, str(self.body)) ) functools.update_wrapper(self, body) args = Arguments() for argname in argnames: if argname in ain: if argname in aout: arg = IOArgument(argname) else: arg = IArgument(argname) elif argname in aout: arg = OArgument(argname) else: arg = EXArgument(argname) args.append(arg) self.args = args body.args = args def __repr__(self): return self.body.__name__ def create_node(self, engine): nodetype = type(self).NodeType return nodetype(engine, self) class Variable(object): """ if the same variable name is modified we use a different postifx this happens as Variable mentioned in Code as O/IOArgument. """ def __init__(self, name, postfix): self.name = name self.postfix = postfix @property def name_vjp(self): return '_' + self.name @property def name_jvp(self): return self.name + '_' def __hash__(self): return hash(self.name + '-%s' % self.postfix) def __eq__(self, other): return self.name == other.name and self.postfix == other.postfix def __repr__(self): if self.postfix is not None: return "%s/%d" % (self.name, self.postfix) else: return "%s" % (self.name) class Argument(object): def __init__(self, name): self.name = name self.value = None self.ovalue = None def copy(self): arg = type(self)(self.name) arg.value = self.value arg.ovalue = self.ovalue return arg @property def name_vjp(self): return '_' + self.name @property def name_jvp(self): return self.name + '_' def dereference(self, context): """ returns the value of an argument by its value if context is None, returns the name of the variable """ if isinstance(self.value, Literal): if context is None: return self.value else: return self.value.value elif isinstance(self.value, Variable): if context is None: return self.value.name else: return context[self.value.name] else: return self.value def __repr__(self): if isinstance(self, IOArgument): return "%s:%s=%s=>%s" % (type(self).__name__, self.name, self.value, self.ovalue) else: return "%s:%s=%s" % (type(self).__name__, self.name, _short_repr(self.value)) class IArgument(Argument): pass class OArgument(Argument): pass class IOArgument(Argument): pass class EXArgument(Argument): pass class Arguments(list): def copy(self): args = Arguments() for arg in self: args.append(arg.copy()) return args def find(self, argname): for arg in self: if arg.name == argname: return arg else: raise KeyError def get_kwargs(self): kwargs = {} for arg in self: if isinstance(arg.value, Variable): kwargs[arg.name] = arg.value.name else: kwargs[arg.name] = arg.value return kwargs def set_values(self, kwargs, defaults, code): _kwargs = defaults.copy() _kwargs.update(kwargs) kwargs = _kwargs for arg in self: if isinstance(arg, EXArgument): variable = kwargs.pop(arg.name) arg.value = variable for arg in self: if isinstance(arg, IArgument): varname = kwargs.pop(arg.name, arg.name) variable = code.get_latest_variable(varname, expired=False) arg.value = variable for arg in self: if isinstance(arg, IOArgument): varname = kwargs.pop(arg.name, arg.name) variable = code.get_latest_variable(varname, expired=True) arg.value = variable arg.ovalue = code.create_latest_variable(varname) for arg in self: if isinstance(arg, OArgument): varname = kwargs.pop(arg.name, arg.name) variable = code.create_latest_variable(varname) arg.ovalue = variable arg.value = variable if len(kwargs) > 0: raise ValueError("additional kwargs are found: %s" % list(kwargs.keys())) class Node(object): # if true, invoke will directly return ZERO when all inputs are ZERO ZERO_BYPASS = False def __init__(self, engine, primitive): self.primitive = primitive self.engine = engine self.args = primitive.args.copy() def copy(self): return type(self)(self.engine, self.primitive) def bind(self, frontier, results): """ bind args to objects in frontier, or LValues """ bound = [] primitive = self.primitive for arg in self.args: if isinstance(arg, (IArgument, IOArgument)): bound.append(arg.dereference(frontier)) elif isinstance(arg, OArgument): bound.append(LValue(arg.value.name, results)) else: bound.append(arg.value) return bound def __repr__(self): return "%s(%s)" % (self.primitive, self.args) def call(self, bound, return_tape=False): # a simple node doesn't know how to return a Tape assert not return_tape r = self.primitive.body(self.engine, *bound) return None def invoke(self, frontier): #logger.info("Invoke %s" % (self)) out = {} bound = self.bind(frontier, out) if self.ZERO_BYPASS and ( all([value is ZERO for arg, value in zip(self.args, bound) if isinstance(arg, (IArgument, IOArgument))])): for arg, value in zip(self.args, bound): if isinstance(arg, OArgument): # IOArguments are already ZEROs value[...] = ZERO else: self.call(bound) for arg, value in zip(self.args, bound): if isinstance(arg, IOArgument): out[arg.value.name] = value return out def invoke_for_tape(self, frontier): bound = self.bind(frontier, {}) tape = self.call(bound, return_tape=True) # The call didn't return a Tape. Likley calling the method on a statement's Node? assert isinstance(tape, Tape) return tape class CodeSegNode(Node): def get_codeseg(self): raise NotImplementedError def call(self, bound, return_tape=False): init = {} lvalues = {} for arg, value in zip(self.args, bound): if isinstance(arg, (IArgument, IOArgument)): init[arg.name] = value elif isinstance(arg, (EXArgument,)): init[arg.name] = value else: lvalues[arg.name] = value aout = [ arg.name for arg in self.args if isinstance(arg, (OArgument, IOArgument))] codeseg = self.get_codeseg() # compute doesn't taint init. out = codeseg.compute(aout, init, return_tape=return_tape) #logger.info("CodeSegment results %s %s" % (aout, _short_repr(out))) if return_tape: out, tape = out else: tape = None for argname, value in zip(aout, out): lvalues[argname][...] = value return tape class Statement(Primitive): NodeType = Node def __init__(self, body, ain, aout): Primitive.__init__(self, body, ain, aout) def defvjp(self, body): """ Define the back-propagation gradient operator. """ gout = ['_' + a for a in self.ain] gin = ['_' + a for a in self.aout] argnames = body.__code__.co_varnames[1:body.__code__.co_argcount] ain = [a for a in self.ain if a in argnames] body.__name__ = "BG:" + self.body.__name__ self.vjp = StatementVJP(body, gin + ain, gout) # allow the gradient with the same name as the original body. return self.vjp def defjvp(self, body): """ Define the forward-propagation gradient operator. """ gin = [a + '_' for a in self.ain] gout = [a + '_' for a in self.aout] argnames = body.__code__.co_varnames[1:body.__code__.co_argcount] ain = [a for a in self.ain if a in argnames] body.__name__ = "FG:" + self.body.__name__ self.jvp = StatementJVP(body, gin + ain, gout) # allow the gradient with the same name as the original body. return self.jvp class StatementVJP(Statement): class NodeType(Node): ZERO_BYPASS = True class StatementJVP(Statement): class NodeType(Node): ZERO_BYPASS = True class Programme(Primitive): def __init__(self, body, ain, aout): Primitive.__init__(self, body, ain, aout) self.vjp = ProgrammeVJP(self) self.jvp = ProgrammeJVP(self) class NodeType(CodeSegNode): def get_codeseg(self): return self.primitive.body(self.engine, *[arg.value if isinstance(arg, EXArgument) else arg.name for arg in self.args]) class ProgrammeVJP(Primitive): def __init__(self, programme): gout = ['_' + a for a in programme.ain] gin = ['_' + a for a in programme.aout] ex = [a for a in programme.argnames if a not in programme.aout + programme.ain] extra = ['#replay-record'] body = lambda : None body.__name__ = "VJP:" + programme.body.__name__ argnames = list(set(gin + gout + programme.ain + ex + extra)) Primitive.__init__(self, body, gin, gout, argnames=argnames) class NodeType(CodeSegNode): ZERO_BYPASS = True def get_codeseg(self): # replay then obtain the gradient codeseg node, d = self.args.find('#replay-record').value tape = node.invoke_for_tape(d) vjpcode = tape.get_vjp() # if an output variable is not mentioned in the code # then the vjp code segment doesn't set the default to ZERO # we fix it here. for arg in self.args: if isinstance(arg, OArgument): vjpcode.defaults[arg.name] = ZERO return vjpcode class ProgrammeJVP(Primitive): def __init__(self, programme): gin = [a + '_' for a in programme.ain] gout = [a + '_' for a in programme.aout] ex = [a for a in programme.argnames if a not in programme.aout + programme.ain] extra = ['#replay-record'] body = lambda : None body.__name__ = "JVP:" + programme.body.__name__ argnames = list(set(gin + gout + programme.ain + ex + extra)) Primitive.__init__(self, body, gin, gout, argnames=argnames) class NodeType(CodeSegNode): ZERO_BYPASS = True def get_codeseg(self): node, d = self.args.find('#replay-record').value tape = node.invoke_for_tape(d) # Watch out: We use the tape version # of VJP because with the code version of VJP # we do not know how to pass in the arguments # these arguments are marked as EXArgument in the VJP # but we need to resolve them from the frontier. jvpcode = tape.get_jvp() return jvpcode class Tape(object): def __init__(self, engine, init): self.records = [] self.init = {} self.init.update(init) self.engine = engine def append(self, node, frontier): d = {} for arg in node.args: # remember all input variable as their values if isinstance(arg, (IArgument, IOArgument)) \ and isinstance(arg.value, Variable): d[arg.value.name] = arg.dereference(frontier) self.records.append((node, d)) def __repr__(self): return '\n'.join('%s | %s' % (node, list(d.keys())) for node, d in self.records) def get_vjp(self): """ Create a code segment that computes the vector jacobian product for a tape, with backward gradient propagation. A vector jacobian product is J_ij v_j where j is the output variable index. The input variable of the returned CodeSegment is '_a', '_b', ... where a, b, ... are the output variables. """ code = CodeSegment(self.engine) add = self.engine.add first_time = {} for node, d in self.records[::-1]: vjp = node.primitive.vjp kwargs = {} partials = [] for arg in node.args: if isinstance(arg, OArgument) \ and arg.name_vjp in vjp.argnames: kwargs[arg.name_vjp] = arg.value.name_vjp if isinstance(arg, (IArgument, IOArgument)) \ and arg.name in vjp.argnames: kwargs[arg.name] = Literal(arg.dereference(d)) if isinstance(arg, (IArgument, IOArgument)) and \ arg.name_vjp in vjp.argnames: if isinstance(arg.value, Literal): kwargs[arg.name_vjp] = '###abandon###' else: if first_time.get(arg.value, True): # directly write to the gradient, it is used kwargs[arg.name_vjp] = arg.value.name_vjp first_time[arg.value] = False else: newname = arg.value.name_vjp + '#partial' kwargs[arg.name_vjp] = newname partials.append((newname, arg.value.name_vjp)) if isinstance(arg, EXArgument) and arg.name in vjp.argnames: kwargs[arg.name] = arg.value if isinstance(node.primitive, Programme): # the vjp of a Programme requires more arguments # to build the vjp codesegment on the fly kwargs['#replay-record'] = (node, d) code.append(vjp, kwargs) for p, r in partials: kwargs = {} kwargs['x1'] = p kwargs['x2'] = r kwargs['y'] = r code.append(add, kwargs) for variable in code._input_variables.values(): code.defaults[variable.name] = ZERO #logger.info("GRADIENT code.defaults: %s " % code.defaults) return code def get_jvp(self): """ creates a CodeSegment that computes the jacobian vector product, from a tape, via forward gradient propagation. A jacobian vector product is J_ij v_i where i is index of the input variables. The returned CodeSegment input is 'a_', 'b_', ... where 'a', 'b', ... are the input variables of the original code segment. The advantage of starting from a tape is that we do not need to compute the original code together with the forward pass. Useful if we need to do vjp and jvp same time. """ code = CodeSegment(self.engine) for node, d in self.records: jvp = node.primitive.jvp kwargs = {} for arg in node.args: if isinstance(arg.value, Variable) and arg.name_jvp in jvp.argnames: kwargs[arg.name_jvp] = arg.value.name_jvp if isinstance(arg, IArgument) and arg.name in jvp.argnames: kwargs[arg.name] = Literal(arg.dereference(d)) # for literal inputs, we shall set the input gradient to zero if isinstance(arg.value, Literal): kwargs[arg.name_jvp] = ZERO if isinstance(arg, IOArgument) and arg.name in jvp.argnames: kwargs[arg.name] = Literal(arg.dereference(d)) if isinstance(arg, EXArgument) and arg.name in jvp.argnames: kwargs[arg.name] = arg.value if isinstance(jvp, ProgrammeJVP): kwargs['#replay-record'] = node, d code.append(jvp, kwargs) for variable in code._input_variables.values(): code.defaults[variable.name] = ZERO return code def to_graph(self, **kwargs): nodes = [node for node, kwargs in self.records] return nodes_to_graph(nodes, **kwargs)[0] class CodeSegment(object): def __init__(self, engine): self.engine = engine self.nodes = [] self.defaults = {} # use these if not provided in init self._liveset = {} # the set of variables ready to be used as input (latest variables). # If a variable is destroyed the value replaced with None. self._postfix = 0 # a unique postfix added to every variable; # if a variable is used as input but not yet been mentioned on the liveset. self._input_variables = {} # input variables def __getattr__(self, name): """ Allow looking up primitives and programmes from the engine namespace """ try: item = getattr(self.engine, name) except AttributeError: raise AttributeError("%s is not a declared primitiveuction in %s" % (name, type(self.engine))) if isinstance(item, Primitive): primitive = item def func(**kwargs): self.append(primitive, kwargs) functools.update_wrapper(func, item) return func else: raise TypeError def __dir__(self): l = [] for name in dir(self.engine): item = getattr(self.engine, name) if isinstance(item, Primitive): l.append(name) return l + dir(type(self)) def copy(self): code = CodeSegment(self.engine) code.nodes.extend(self.nodes) code.defaults.update(self.defaults) code._liveset.update(self._liveset) code._postfix = self._postfix code._input_variables = self._input_variables return code def get_latest_variable(self, varname, expired=False): if isinstance(varname, Literal): return varname if varname not in self._liveset: self._postfix = self._postfix + 1 variable = Variable(varname, self._postfix) self._input_variables[varname] = variable else: variable = self._liveset.get(varname) if variable is None: self._postfix = self._postfix + 1 variable = Variable(varname, self._postfix) if not expired: self._liveset[varname] = variable else: self._liveset[varname] = None return variable def create_latest_variable(self, varname): self._postfix = self._postfix + 1 variable = Variable(varname, self._postfix) self._liveset[varname] = variable return variable def append(self, primitive, kwargs): node = primitive.create_node(self.engine) node.args.set_values(kwargs, primitive.defaults, self) self.nodes.append(node) def optimize(self, vout): out = [ self.get_latest_variable(varname) for varname in vout] nodes = _optimize(self.nodes, out) segment = self.copy() segment.nodes = nodes return segment def compute(self, vout, init, return_tape=False, monitor=None): assign = self.engine.assign.body if not isinstance(vout, (tuple, list, set)): vout = [vout] squeeze = True else: squeeze = False frontier = {} for var, value in self.defaults.items(): frontier[var] = value for var, value in init.items(): frontier[var] = value if return_tape: tape = Tape(self.engine, frontier) # XXX This is not nice. But requires too much # refactoring to get it right. self = self.copy() # we need to connect the outputs to a 'terminal' # node such that their input gradients are not # overwritten by the partial gradients of # subsequential operations. @statement(ain=['x'], aout=['x']) def mark(engine, x): pass @mark.defvjp def _(engine, _x): pass @mark.defjvp def _(engine, x_): pass @mark.vjp.defjvp def _(engine, _x_): pass for varname in vout: self.append(mark, {'x' : varname}) else: tape = None out = [ self.get_latest_variable(varname) for varname in vout] nodes = _optimize(self.nodes, out) freeables = _get_freeables(nodes, out) for i, (node, abandon) in enumerate(zip(nodes, freeables)): if tape: tape.append(node, frontier) if node.primitive is not mark: for arg in node.args: if not isinstance(arg, IOArgument): continue # FIXME: use copy assign(self.engine, x=frontier[arg.value.name], y=LValue(arg.value.name, frontier)) try: r = node.invoke(frontier) except Exception as e: print("Failure in running `%s`" % node) raise if monitor is not None: monitor(node, frontier, r) for var in abandon: frontier.pop(var.name) # if len(abandon): # logger.info("Removed from frontier %s", abandon) frontier.update(r) #logger.info("Frontier %s", list(frontier.keys())) r = [frontier[vn] for vn in vout] if squeeze: r = r[0] if return_tape: r = r, tape return r def get_jvp(self, init={}): """ creates a CodeSegment that computes the jacobian vector product, with forward gradient propagation. A jacobian vector product is J_ij v_i where i is index of the input variables. The returned CodeSegment input is 'a_', 'b_', ... where 'a', 'b', ... are the input variables of the original code segment. This will compute the original code together with the forward gradient pass. """ code = CodeSegment(self.engine) for node in self.nodes: jvp = node.primitive.jvp kwargs = {} for arg in node.args: if isinstance(arg.value, Variable) and arg.name_jvp in jvp.argnames: kwargs[arg.name_jvp] = arg.value.name_jvp if isinstance(arg, (IArgument, IOArgument)) and arg.name in jvp.argnames: kwargs[arg.name] = arg.dereference(None) if isinstance(arg, EXArgument) and arg.name in jvp.argnames: kwargs[arg.name] = arg.value if isinstance(jvp, ProgrammeJVP): # empty init because all variables are on the frontier. kwargs['#replay-record'] = node, {} code.append(jvp, kwargs) code.append(node.primitive, node.args.get_kwargs()) for variable in code._input_variables.values(): code.defaults[variable.name] = ZERO # merge in the defaults of self code.defaults.update(self.defaults) # initialize with the defaults code.defaults.update(init) return code def compute_with_gradient(self, vout, init, ginit, return_tape=False): if not isinstance(vout, (tuple, list, set)): vout = [vout] squeeze = True else: squeeze = False cnout = [vn for vn in vout if not vn.startswith('_')] # if gradient request are requested, they must be computed cnout_g = [ vn[1:] for vn in ginit] gnout = [vn for vn in vout if vn.startswith('_')] cout, tape = self.compute(cnout + cnout_g, init, return_tape=True) cout = cout[:len(cnout)] vjpcode = tape.get_vjp() gout = vjpcode.compute(gnout, ginit) d = {} d.update(zip(cnout, cout)) d.update(zip(gnout, gout)) out = [d[vn] for vn in vout] if squeeze: out = out[0] return out def __repr__(self): nodes = '\n'.join('%s' % node for node in self.nodes) return '\n'.join([nodes]) def to_graph(self, **kwargs): return nodes_to_graph(self.nodes, **kwargs)[0] # an optional base class for computing engines class Engine(object): @statement(ain=['x1', 'x2'], aout=['y']) def add(engine, x1, x2, y): y[...] = x1 + x2 @statement(ain=['x'], aout=['y']) def assign(engine, x, y): y[...] = x * 1.0 def _optimize(nodes, out): """ return an optimized codeseg for computing vout. irrelevant nodes are pruned. """ deps = set(out) newnodes = [] for node in nodes[::-1]: keep = False for arg in node.args: if isinstance(arg, OArgument) and arg.value in deps: keep = True deps.remove(arg.value) if isinstance(arg, IOArgument) and arg.ovalue in deps: keep = True deps.remove(arg.ovalue) if not keep: continue newnodes.append(node) for arg in node.args: if isinstance(arg, (IArgument, IOArgument)): deps.add(arg.value) return list(reversed(newnodes)) def _get_freeables(nodes, out): refcounts = {} for var in out: refcounts[var] = refcounts.get(var, 0) + 1 for node in nodes: for arg in node.args: if isinstance(arg, (IArgument, IOArgument)): refcounts[arg.value] = refcounts.get(arg.value, 0) + 1 free_list = [] for node in nodes: item = [] for arg in node.args: if isinstance(arg, (IArgument, IOArgument)): if isinstance(arg.value, Literal): continue refcounts[arg.value] = refcounts[arg.value] - 1 if refcounts[arg.value] == 0: item.append(arg.value) free_list.append(item) return free_list def nodes_to_graph(nodes, depth=0, **kwargs): """ Graph representation of nodes, kwargs are sent to graphviz depth controls the behavior of programme nodes. only depth level of sub graphs are made. """ import graphviz graph = graphviz.Digraph(**kwargs) def unique(obj): return '%08X%08X' % (id(nodes), id(obj)) subgraphs = {} for i, node in enumerate(nodes): label = '%s<BR/>' % str(node.primitive) ex = [] for arg in node.args: if isinstance(arg, EXArgument): # bypass aux arguments starting with sharp if not arg.name.startswith('#'): ex.append(str(arg)) label = label + '<BR/>'.join(ex) label = '<' + label + '>' if depth > 0 and isinstance(node, CodeSegNode): # render the programme nodes as subgraphs codeseg = node.get_codeseg() subgraph, inputs, outputs = nodes_to_graph(codeseg.nodes, depth - 1) subgraph.name = 'cluster_' + str(node.primitive) subgraph.attr('graph', label=label) subgraph.attr('graph', color='blue') subgraph.attr('graph', style='dotted') graph.subgraph(subgraph) subgraphs[unique(node)] = (inputs, outputs) else: graph.node(unique(node), label=label, shape='box') source = {} inputs, outputs = {}, {} def process_in_arg(arg, node): attrs = {} attrs['label'] = '<' + str(arg.value) + '>' nodeid = unique(node) if isinstance(arg, (IArgument, IOArgument)): if arg.value in source: from_nodeid, from_arg = source[arg.value] attrs['taillabel'] = '<' + str(from_arg.name) + '>' attrs['tail_lp'] = "12" else: from_nodeid = nodeid + unique(arg.value) graph.node(from_nodeid, label=str(arg.value)) if not isinstance(arg.value, Literal): inputs[arg.value.name] = from_nodeid if nodeid in subgraphs: nodeid = subgraphs[nodeid][0][arg.name] attrs['headlabel'] = '<' + str(arg.name) + '>' attrs['head_lp'] = "12" graph.edge(from_nodeid, nodeid, **attrs) def process_out_arg(arg, node): if isinstance(arg, (OArgument, IOArgument)): nodeid = unique(node) if nodeid in subgraphs: nodeid = subgraphs[nodeid][1][arg.name] source[arg.ovalue] = (nodeid, arg) outputs[arg.value.name] = nodeid for i, node in enumerate(nodes): for arg in node.args: process_in_arg(arg, node) process_out_arg(arg, node) return graph, inputs, outputs def _short_repr(obj): if isinstance(obj, (list, tuple)): return [_short_repr(i) for i in obj] else: s = '%s' % obj if len(s) > 30: s = '[%s]' % type(obj).__name__ return s
gpl-3.0
-5,166,341,015,161,290,000
33.063278
107
0.548588
false
rinatzakirov/vhdl
drivers/ddr_analyze.py
1
1995
import pyqtgraph as pg import numpy as np def drawFft(filename, side): data = np.fromfile("dump.dat", dtype="uint16").astype(float) data = data[side::2] #data = data[:-1] #data = data[:8192] #pg.plot(data) #return print "raw" print np.min(data) print np.max(data) data -= np.average(data) data = data / float(2**15) data.astype('float32').tofile(filename + ".f32") #pg.plot(data) #data = data[:1024] def plotPower(fftData, N, C, F): v = np.absolute(np.fft.fftshift(fftData) / N) sqV = v * v p = sqV / 50.0 * 1000 * 2 # * 2 is made to take care of the fact that FFT is two-sided pdBm = np.log10(p) * 10 print "max = %f" % np.max(pdBm) print "avg = %f" % np.average(pdBm) #pg.plot(np.linspace(-Fs/2, Fs/2 - Fs / N, N), pdBm) s1 = N/2 + N * F / Fs - N * 1000000 / Fs s2 = N/2 + N * F / Fs + N * 1000000 / Fs pg.plot(pdBm[int(s1):int(s2)]) #854839 N = data.shape[0] #Fs = 105 * 1000 * 1000 Fs = 105 * 1000 * 1000 #F = 10.7 * 1000 * 1000 F = 10.7 * 1000 * 1000 #data = np.sin((N - 1) * (F / Fs) * np.linspace(0, np.pi * 2, N)) print "scale" print np.min(data) print np.max(data) print np.log10(pow((np.max(data) - np.min(data)) * np.sqrt(2.0) / 2.0, 2.0) / 50 * 1000) * 10 z = np.linspace(0, 2 * np.pi, N) cos = np.cos window = 1 - 1.96760033 * cos(z) + 1.57983607 * cos(2 * z) - 0.81123644 * cos(3 * z) + 0.22583558 * cos(4 * z) - 0.02773848 * cos(5 * z) + 0.00090360 * cos(6 * z) #pg.plot(window) #window = np.blackman(N) window /= np.sum(window) / N plotPower(np.fft.fft(window * data), N, Fs, F) #pg.plot(data) subFft = 256 window = np.blackman(256) sum = np.array(np.fft.fft(data[0:subFft])) for i in range(1, N / subFft): sum += np.fft.fft(window * data[(i * subFft):((i + 1) * subFft)]) #plotPower(sum, subFft, Fs) drawFft("more1", 0) drawFft("more1", 1) #drawFft("more2.bin") from pyqtgraph.Qt import QtCore, QtGui QtGui.QApplication.instance().exec_()
lgpl-2.1
-8,487,399,865,354,513,000
28.352941
164
0.578947
false
petebachant/scipy
scipy/linalg/_interpolative_backend.py
143
44935
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** """ Direct wrappers for Fortran `id_dist` backend. """ import scipy.linalg._interpolative as _id import numpy as np _RETCODE_ERROR = RuntimeError("nonzero return code") #------------------------------------------------------------------------------ # id_rand.f #------------------------------------------------------------------------------ def id_srand(n): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. :param n: Number of pseudorandom numbers to generate. :type n: int :return: Pseudorandom numbers. :rtype: :class:`numpy.ndarray` """ return _id.id_srand(n) def id_srandi(t): """ Initialize seed values for :func:`id_srand` (any appropriately random numbers will do). :param t: Array of 55 seed values. :type t: :class:`numpy.ndarray` """ t = np.asfortranarray(t) _id.id_srandi(t) def id_srando(): """ Reset seed values to their original values. """ _id.id_srando() #------------------------------------------------------------------------------ # idd_frm.f #------------------------------------------------------------------------------ def idd_frm(n, w, x): """ Transform real vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idd_sfrm`, this routine works best when the length of the transformed vector is the power-of-two integer output by :func:`idd_frmi`, or when the length is not specified but instead determined a posteriori from the output. The returned transformed vector is randomly permuted. :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idd_frmi`; `n` is also the length of the output vector. :type n: int :param w: Initialization array constructed by :func:`idd_frmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idd_frm(n, w, x) def idd_sfrm(l, n, w, x): """ Transform real vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idd_frm`, this routine works best when the length of the transformed vector is known a priori. :param l: Length of transformed vector, satisfying `l <= n`. :type l: int :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idd_sfrmi`. :type n: int :param w: Initialization array constructed by :func:`idd_sfrmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idd_sfrm(l, n, w, x) def idd_frmi(m): """ Initialize data for :func:`idd_frm`. :param m: Length of vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idd_frm`. :rtype: :class:`numpy.ndarray` """ return _id.idd_frmi(m) def idd_sfrmi(l, m): """ Initialize data for :func:`idd_sfrm`. :param l: Length of output transformed vector. :type l: int :param m: Length of the vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idd_sfrm`. :rtype: :class:`numpy.ndarray` """ return _id.idd_sfrmi(l, m) #------------------------------------------------------------------------------ # idd_id.f #------------------------------------------------------------------------------ def iddp_id(eps, A): """ Compute ID of a real matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) k, idx, rnorms = _id.iddp_id(eps, A) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def iddr_id(A, k): """ Compute ID of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) idx, rnorms = _id.iddr_id(A, k) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj def idd_reconid(B, idx, proj): """ Reconstruct matrix from real ID. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Reconstructed matrix. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) if proj.size > 0: return _id.idd_reconid(B, idx, proj) else: return B[:, np.argsort(idx)] def idd_reconint(idx, proj): """ Reconstruct interpolation matrix from real ID. :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Interpolation matrix. :rtype: :class:`numpy.ndarray` """ return _id.idd_reconint(idx, proj) def idd_copycols(A, k, idx): """ Reconstruct skeleton matrix from real ID. :param A: Original matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :param idx: Column index array. :type idx: :class:`numpy.ndarray` :return: Skeleton matrix. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) return _id.idd_copycols(A, k, idx) #------------------------------------------------------------------------------ # idd_id2svd.f #------------------------------------------------------------------------------ def idd_id2svd(B, idx, proj): """ Convert real ID to SVD. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) U, V, S, ier = _id.idd_id2svd(B, idx, proj) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idd_snorm.f #------------------------------------------------------------------------------ def idd_snorm(m, n, matvect, matvec, its=20): """ Estimate spectral norm of a real matrix by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate. :rtype: float """ snorm, v = _id.idd_snorm(m, n, matvect, matvec, its) return snorm def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20): """ Estimate spectral norm of the difference of two real matrices by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the transpose of the first matrix to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvect2: Function to apply the transpose of the second matrix to a vector, with call signature `y = matvect2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect2: function :param matvec: Function to apply the first matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param matvec2: Function to apply the second matrix to a vector, with call signature `y = matvec2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec2: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate of matrix difference. :rtype: float """ return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its) #------------------------------------------------------------------------------ # idd_svd.f #------------------------------------------------------------------------------ def iddr_svd(A, k): """ Compute SVD of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) U, V, S, ier = _id.iddr_svd(A, k) if ier: raise _RETCODE_ERROR return U, V, S def iddp_svd(eps, A): """ Compute SVD of a real matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddp_aid.f #------------------------------------------------------------------------------ def iddp_aid(eps, A): """ Compute ID of a real matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, w = idd_frmi(m) proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F') k, idx, proj = _id.iddp_aid(eps, A, w, proj) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idd_estrank(eps, A): """ Estimate rank of a real matrix to a specified relative precision using random sampling. The output rank is typically about 8 higher than the actual rank. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank estimate. :rtype: int """ A = np.asfortranarray(A) m, n = A.shape n2, w = idd_frmi(m) ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F') k, ra = _id.idd_estrank(eps, A, w, ra) return k #------------------------------------------------------------------------------ # iddp_asvd.f #------------------------------------------------------------------------------ def iddp_asvd(eps, A): """ Compute SVD of a real matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, winit = _id.idd_frmi(m) w = np.empty( max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2, (2*n + 1)*(n2 + 1)), order='F') k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddp_rid.f #------------------------------------------------------------------------------ def iddp_rid(eps, m, n, matvect): """ Compute ID of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F') k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj) if ier != 0: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idd_findrank(eps, m, n, matvect): """ Estimate rank of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :return: Rank estimate. :rtype: int """ k, ra, ier = _id.idd_findrank(eps, m, n, matvect) if ier: raise _RETCODE_ERROR return k #------------------------------------------------------------------------------ # iddp_rsvd.f #------------------------------------------------------------------------------ def iddp_rsvd(eps, m, n, matvect, matvec): """ Compute SVD of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddr_aid.f #------------------------------------------------------------------------------ def iddr_aid(A, k): """ Compute ID of a real matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = iddr_aidi(m, n, k) idx, proj = _id.iddr_aid(A, k, w) if k == n: proj = np.array([], dtype='float64', order='F') else: proj = proj.reshape((k, n-k), order='F') return idx, proj def iddr_aidi(m, n, k): """ Initialize array for :func:`iddr_aid`. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param k: Rank of ID. :type k: int :return: Initialization array to be used by :func:`iddr_aid`. :rtype: :class:`numpy.ndarray` """ return _id.iddr_aidi(m, n, k) #------------------------------------------------------------------------------ # iddr_asvd.f #------------------------------------------------------------------------------ def iddr_asvd(A, k): """ Compute SVD of a real matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F') w_ = iddr_aidi(m, n, k) w[:w_.size] = w_ U, V, S, ier = _id.iddr_asvd(A, k, w) if ier != 0: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # iddr_rid.f #------------------------------------------------------------------------------ def iddr_rid(m, n, matvect, k): """ Compute ID of a real matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ idx, proj = _id.iddr_rid(m, n, matvect, k) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj #------------------------------------------------------------------------------ # iddr_rsvd.f #------------------------------------------------------------------------------ def iddr_rsvd(m, n, matvect, matvec, k): """ Compute SVD of a real matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k) if ier != 0: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idz_frm.f #------------------------------------------------------------------------------ def idz_frm(n, w, x): """ Transform complex vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idz_sfrm`, this routine works best when the length of the transformed vector is the power-of-two integer output by :func:`idz_frmi`, or when the length is not specified but instead determined a posteriori from the output. The returned transformed vector is randomly permuted. :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idz_frmi`; `n` is also the length of the output vector. :type n: int :param w: Initialization array constructed by :func:`idz_frmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idz_frm(n, w, x) def idz_sfrm(l, n, w, x): """ Transform complex vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idz_frm`, this routine works best when the length of the transformed vector is known a priori. :param l: Length of transformed vector, satisfying `l <= n`. :type l: int :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idz_sfrmi`. :type n: int :param w: Initialization array constructed by :func:`idd_sfrmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idz_sfrm(l, n, w, x) def idz_frmi(m): """ Initialize data for :func:`idz_frm`. :param m: Length of vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idz_frm`. :rtype: :class:`numpy.ndarray` """ return _id.idz_frmi(m) def idz_sfrmi(l, m): """ Initialize data for :func:`idz_sfrm`. :param l: Length of output transformed vector. :type l: int :param m: Length of the vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idz_sfrm`. :rtype: :class:`numpy.ndarray` """ return _id.idz_sfrmi(l, m) #------------------------------------------------------------------------------ # idz_id.f #------------------------------------------------------------------------------ def idzp_id(eps, A): """ Compute ID of a complex matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) k, idx, rnorms = _id.idzp_id(eps, A) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idzr_id(A, k): """ Compute ID of a complex matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) idx, rnorms = _id.idzr_id(A, k) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj def idz_reconid(B, idx, proj): """ Reconstruct matrix from complex ID. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Reconstructed matrix. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) if proj.size > 0: return _id.idz_reconid(B, idx, proj) else: return B[:, np.argsort(idx)] def idz_reconint(idx, proj): """ Reconstruct interpolation matrix from complex ID. :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Interpolation matrix. :rtype: :class:`numpy.ndarray` """ return _id.idz_reconint(idx, proj) def idz_copycols(A, k, idx): """ Reconstruct skeleton matrix from complex ID. :param A: Original matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :param idx: Column index array. :type idx: :class:`numpy.ndarray` :return: Skeleton matrix. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) return _id.idz_copycols(A, k, idx) #------------------------------------------------------------------------------ # idz_id2svd.f #------------------------------------------------------------------------------ def idz_id2svd(B, idx, proj): """ Convert complex ID to SVD. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) U, V, S, ier = _id.idz_id2svd(B, idx, proj) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idz_snorm.f #------------------------------------------------------------------------------ def idz_snorm(m, n, matveca, matvec, its=20): """ Estimate spectral norm of a complex matrix by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate. :rtype: float """ snorm, v = _id.idz_snorm(m, n, matveca, matvec, its) return snorm def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20): """ Estimate spectral norm of the difference of two complex matrices by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the adjoint of the first matrix to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matveca2: Function to apply the adjoint of the second matrix to a vector, with call signature `y = matveca2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca2: function :param matvec: Function to apply the first matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param matvec2: Function to apply the second matrix to a vector, with call signature `y = matvec2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec2: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate of matrix difference. :rtype: float """ return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its) #------------------------------------------------------------------------------ # idz_svd.f #------------------------------------------------------------------------------ def idzr_svd(A, k): """ Compute SVD of a complex matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) U, V, S, ier = _id.idzr_svd(A, k) if ier: raise _RETCODE_ERROR return U, V, S def idzp_svd(eps, A): """ Compute SVD of a complex matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzp_aid.f #------------------------------------------------------------------------------ def idzp_aid(eps, A): """ Compute ID of a complex matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, w = idz_frmi(m) proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F') k, idx, proj = _id.idzp_aid(eps, A, w, proj) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idz_estrank(eps, A): """ Estimate rank of a complex matrix to a specified relative precision using random sampling. The output rank is typically about 8 higher than the actual rank. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank estimate. :rtype: int """ A = np.asfortranarray(A) m, n = A.shape n2, w = idz_frmi(m) ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F') k, ra = _id.idz_estrank(eps, A, w, ra) return k #------------------------------------------------------------------------------ # idzp_asvd.f #------------------------------------------------------------------------------ def idzp_asvd(eps, A): """ Compute SVD of a complex matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, winit = _id.idz_frmi(m) w = np.empty( max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2, (2*n + 1)*(n2 + 1)), dtype=np.complex128, order='F') k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzp_rid.f #------------------------------------------------------------------------------ def idzp_rid(eps, m, n, matveca): """ Compute ID of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty( m + 1 + 2*n*(min(m, n) + 1), dtype=np.complex128, order='F') k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) if ier: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idz_findrank(eps, m, n, matveca): """ Estimate rank of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank estimate. :rtype: int """ k, ra, ier = _id.idz_findrank(eps, m, n, matveca) if ier: raise _RETCODE_ERROR return k #------------------------------------------------------------------------------ # idzp_rsvd.f #------------------------------------------------------------------------------ def idzp_rsvd(eps, m, n, matveca, matvec): """ Compute SVD of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzr_aid.f #------------------------------------------------------------------------------ def idzr_aid(A, k): """ Compute ID of a complex matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = idzr_aidi(m, n, k) idx, proj = _id.idzr_aid(A, k, w) if k == n: proj = np.array([], dtype='complex128', order='F') else: proj = proj.reshape((k, n-k), order='F') return idx, proj def idzr_aidi(m, n, k): """ Initialize array for :func:`idzr_aid`. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param k: Rank of ID. :type k: int :return: Initialization array to be used by :func:`idzr_aid`. :rtype: :class:`numpy.ndarray` """ return _id.idzr_aidi(m, n, k) #------------------------------------------------------------------------------ # idzr_asvd.f #------------------------------------------------------------------------------ def idzr_asvd(A, k): """ Compute SVD of a complex matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = np.empty( (2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90, dtype='complex128', order='F') w_ = idzr_aidi(m, n, k) w[:w_.size] = w_ U, V, S, ier = _id.idzr_asvd(A, k, w) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idzr_rid.f #------------------------------------------------------------------------------ def idzr_rid(m, n, matveca, k): """ Compute ID of a complex matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ idx, proj = _id.idzr_rid(m, n, matveca, k) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj #------------------------------------------------------------------------------ # idzr_rsvd.f #------------------------------------------------------------------------------ def idzr_rsvd(m, n, matveca, matvec, k): """ Compute SVD of a complex matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k) if ier: raise _RETCODE_ERROR return U, V, S
bsd-3-clause
3,825,534,668,686,145,000
25.923307
79
0.532035
false
miso-belica/sumy
sumy/summarizers/text_rank.py
1
4579
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division, print_function, unicode_literals import math try: import numpy except ImportError: numpy = None from ._summarizer import AbstractSummarizer class TextRankSummarizer(AbstractSummarizer): """An implementation of TextRank algorithm for summarization. Source: https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf """ epsilon = 1e-4 damping = 0.85 # small number to prevent zero-division error, see https://github.com/miso-belica/sumy/issues/112 _ZERO_DIVISION_PREVENTION = 1e-7 _stop_words = frozenset() @property def stop_words(self): return self._stop_words @stop_words.setter def stop_words(self, words): self._stop_words = frozenset(map(self.normalize_word, words)) def __call__(self, document, sentences_count): self._ensure_dependencies_installed() if not document.sentences: return () ratings = self.rate_sentences(document) return self._get_best_sentences(document.sentences, sentences_count, ratings) @staticmethod def _ensure_dependencies_installed(): if numpy is None: raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.") def rate_sentences(self, document): matrix = self._create_matrix(document) ranks = self.power_method(matrix, self.epsilon) return {sent: rank for sent, rank in zip(document.sentences, ranks)} def _create_matrix(self, document): """Create a stochastic matrix for TextRank. Element at row i and column j of the matrix corresponds to the similarity of sentence i and j, where the similarity is computed as the number of common words between them, divided by their sum of logarithm of their lengths. After such matrix is created, it is turned into a stochastic matrix by normalizing over columns i.e. making the columns sum to one. TextRank uses PageRank algorithm with damping, so a damping factor is incorporated as explained in TextRank's paper. The resulting matrix is a stochastic matrix ready for power method. """ sentences_as_words = [self._to_words_set(sent) for sent in document.sentences] sentences_count = len(sentences_as_words) weights = numpy.zeros((sentences_count, sentences_count)) for i, words_i in enumerate(sentences_as_words): for j in range(i, sentences_count): rating = self._rate_sentences_edge(words_i, sentences_as_words[j]) weights[i, j] = rating weights[j, i] = rating weights /= (weights.sum(axis=1)[:, numpy.newaxis] + self._ZERO_DIVISION_PREVENTION) # In the original paper, the probability of randomly moving to any of the vertices # is NOT divided by the number of vertices. Here we do divide it so that the power # method works; without this division, the stationary probability blows up. This # should not affect the ranking of the vertices so we can use the resulting stationary # probability as is without any postprocessing. return numpy.full((sentences_count, sentences_count), (1.-self.damping) / sentences_count) \ + self.damping * weights def _to_words_set(self, sentence): words = map(self.normalize_word, sentence.words) return [self.stem_word(w) for w in words if w not in self._stop_words] @staticmethod def _rate_sentences_edge(words1, words2): rank = sum(words2.count(w) for w in words1) if rank == 0: return 0.0 assert len(words1) > 0 and len(words2) > 0 norm = math.log(len(words1)) + math.log(len(words2)) if numpy.isclose(norm, 0.): # This should only happen when words1 and words2 only have a single word. # Thus, rank can only be 0 or 1. assert rank in (0, 1) return float(rank) else: return rank / norm @staticmethod def power_method(matrix, epsilon): transposed_matrix = matrix.T sentences_count = len(matrix) p_vector = numpy.array([1.0 / sentences_count] * sentences_count) lambda_val = 1.0 while lambda_val > epsilon: next_p = numpy.dot(transposed_matrix, p_vector) lambda_val = numpy.linalg.norm(numpy.subtract(next_p, p_vector)) p_vector = next_p return p_vector
apache-2.0
-5,269,407,314,558,454,000
38.817391
117
0.652544
false
timothydmorton/isochrones
isochrones/priors.py
1
16204
from __future__ import print_function, division import numpy as np import pandas as pd import scipy.stats from scipy.stats import uniform, lognorm from scipy.integrate import quad from scipy.stats._continuous_distns import _norm_pdf, _norm_cdf, _norm_logpdf import matplotlib.pyplot as plt import numba as nb from math import log, log10 from .logger import getLogger _norm_pdf_C = np.sqrt(2 * np.pi) ONE_OVER_ROOT_2PI = 1.0 / _norm_pdf_C _norm_pdf_logC = np.log(_norm_pdf_C) LOG_ONE_OVER_ROOT_2PI = np.log(ONE_OVER_ROOT_2PI) def _norm_pdf(x): return np.exp(-(x ** 2) / 2.0) / _norm_pdf_C def _norm_logpdf(x): return -(x ** 2) / 2.0 - _norm_pdf_logC class Prior(object): def __init__(self, *args, **kwargs): self._norm = 1.0 def __call__(self, x, **kwargs): return self.pdf(x, **kwargs) @property def bounds(self): return (-np.inf, np.inf) if getattr(self, "_bounds", None) is None else self._bounds @bounds.setter def bounds(self, new): self._norm = quad(self._pdf, *new)[0] self._bounds = new try: self.test_integral() except AssertionError: raise ValueError(f"Problem setting bounds to {new}; integral test failed.") def _pdf(self, x, **kwargs): raise NotImplementedError def pdf(self, x, **kwargs): lo, hi = self.bounds if x < lo or x > hi: return 0 else: return self._pdf(x, **kwargs) / self._norm def lnpdf(self, x, **kwargs): if hasattr(self, "_lnpdf"): return self._lnpdf(x, **kwargs) else: pdf = self(x, **kwargs) return np.log(pdf) if pdf else -np.inf def sample(self, n): if hasattr(self, "distribution"): return self.distribution.rvs(n) else: raise NotImplementedError def test_integral(self): assert np.isclose(1, quad(self.pdf, *self.bounds)[0]) def test_sampling(self, n=100000, plot=False): x = self.sample(n) if hasattr(self, "bounds"): if self.bounds == (-np.inf, np.inf): rng = None else: rng = self.bounds else: rng = None hn, _ = np.histogram(x, range=rng) h, b = np.histogram(x, density=True, range=rng) logger = getLogger() logger.debug("bins: {}".format(b)) logger.debug("raw: {}".format(hn)) pdf = np.array([quad(self.pdf, lo, hi)[0] / (hi - lo) for lo, hi in zip(b[:-1], b[1:])]) sigma = 1.0 / np.sqrt(hn) resid = np.absolute(pdf - h) / pdf logger.debug("pdf: {}".format(pdf)) logger.debug("hist: {}".format(h)) logger.debug("sigma: {}".format(sigma)) logger.debug("{}".format(resid / sigma)) c = (b[:-1] + b[1:]) / 2 if plot: plt.plot(c, h, "_") plt.plot(c, pdf, "x") else: assert max((resid / sigma)[hn > 50]) < 6 class BoundedPrior(Prior): def __init__(self, bounds=None): self._bounds = bounds super(BoundedPrior, self).__init__() def __call__(self, x, **kwargs): if self.bounds is not None: lo, hi = self.bounds if x < lo or x > hi: return 0 return self.pdf(x, **kwargs) @property def bounds(self): return self._bounds @bounds.setter def bounds(self, new): self._bounds = new try: self.test_integral() except AssertionError: raise ValueError(f"Problem setting bounds to {new}; integral test failed.") def lnpdf(self, x, **kwargs): if self.bounds is not None: lo, hi = self.bounds if x < lo or x > hi: return -np.inf if hasattr(self, "_lnpdf"): return self._lnpdf(x, **kwargs) else: pdf = self.pdf(x, **kwargs) return np.log(pdf) if pdf else -np.inf class BrokenPrior(Prior): """Composition of multiple stitched-together priors, with breakpoint(s) Parameters ---------- components : list of `Prior` objects Components going into prior breakpoints : array List of breakpoints separating components. Length = n(components) - 1 """ def __init__(self, components, breakpoints, bounds=None): self.components = components self.n_components = len(components) self.breakpoints = breakpoints if bounds is None: bounds = (-np.inf, np.inf) self._bounds = bounds self._norm = 1.0 self.quad_args = dict(limit=200) self._initialize() @property def bounds(self): return super().bounds @bounds.setter def bounds(self, new): self._bounds = new self._initialize() def _initialize(self): # lo = min([c.bounds[0] for c in self.components] + [self.bounds[0]]) # hi = max([c.bounds[1] for c in self.components] + [self.bounds[1]]) lo, hi = self.bounds full_domain = [lo] + list(self.breakpoints) + [hi] domains = [(a, b) for a, b in zip(full_domain[:-1], full_domain[1:])] self.domains = domains norms = np.ones(self.n_components) for i in range(1, self.n_components): x = self.breakpoints[i - 1] norms[i] = self.components[i](x) / self.components[i - 1](x) # Compute total norm tot = 0 for comp, (a, b), norm in zip(self.components, domains, norms): fn = lambda x: comp(x) / norm tot += quad(fn, a, b, **self.quad_args)[0] self.norms = norms * tot self.lognorms = np.log(self.norms) cumnorm = np.zeros(self.n_components) for i, (comp, (a, b), norm) in enumerate(zip(self.components, domains, self.norms)): fn = lambda x: comp(x) / norm cumnorm[i] = quad(fn, a, b, **self.quad_args)[0] self.cumnorm = cumnorm def _pdf(self, x): i = np.digitize(x, self.breakpoints) return self.components[i](x) / self.norms[i] def _lnpdf(self, x): i = np.digitize(x, self.breakpoints) return self.components[i].lnpdf(x) - self.lognorms[i] def sample(self, n): u = np.random.random(n) x = np.zeros(n) u_cumthresh = 0 for comp, u_thresh, (a, b) in zip(self.components, self.cumnorm, self.domains): u_cumthresh += u_thresh mask = (u < u_cumthresh) & (x == 0.0) n_comp = mask.sum() samples = comp.sample(n_comp) oob = (samples < a) | (samples > b) n_oob = oob.sum() while n_oob: samples[oob] = comp.sample(n_oob) oob = (samples < a) | (samples > b) n_oob = oob.sum() x[mask] = samples return x class GaussianPrior(BoundedPrior): def __init__(self, mean, sigma, bounds=None): self.mean = mean self.sigma = sigma self._bounds = bounds self._norm = 1.0 if bounds: lo, hi = bounds a, b = (lo - mean) / sigma, (hi - mean) / sigma self.distribution = scipy.stats.truncnorm(a, b, loc=mean, scale=sigma) self.norm = _norm_cdf(b) - _norm_cdf(a) self.lognorm = np.log(self.norm) else: self.distribution = scipy.stats.norm(mean, sigma) self.norm = 1.0 self.lognorm = 0.0 def _pdf(self, x): return _norm_pdf((x - self.mean) / self.sigma) / self.sigma / self.norm def _lnpdf(self, x): return _norm_logpdf((x - self.mean) / self.sigma) - np.log(self.sigma) - self.lognorm class LogNormalPrior(Prior): def __init__(self, mu, sigma, bounds=None): self.mu = mu self.sigma = sigma self.scale = np.exp(mu) self.log_s = np.log(sigma) self.distribution = lognorm(sigma, scale=np.exp(mu)) self._bounds = (0, np.inf) super().__init__(self) def _pdf(self, x): s = self.sigma y = x / self.scale return ONE_OVER_ROOT_2PI / (s * y) * np.exp(-0.5 * (np.log(y) / s) ** 2) / self.scale def _lnpdf(self, x): s = self.sigma y = x / self.scale return LOG_ONE_OVER_ROOT_2PI - (self.log_s + np.log(y)) - 0.5 * (np.log(y) / s) ** 2 - self.mu class FlatPrior(BoundedPrior): def __init__(self, bounds): super().__init__(bounds=bounds) def _pdf(self, x): lo, hi = self.bounds return 1.0 / (hi - lo) def sample(self, n): lo, hi = self.bounds return np.random.random(n) * (hi - lo) + lo class FlatLogPrior(BoundedPrior): def __init__(self, bounds): super(FlatLogPrior, self).__init__(bounds=bounds) def _pdf(self, x): lo, hi = self.bounds return np.log(10) * 10 ** x / (10 ** hi - 10 ** lo) def sample(self, n): lo, hi = self.bounds return np.log10(np.random.random(n) * (10 ** hi - 10 ** lo) + 10 ** lo) class PowerLawPrior(BoundedPrior): def __init__(self, alpha, bounds=None): self.alpha = alpha super(PowerLawPrior, self).__init__(bounds=bounds) def _pdf(self, x): lo, hi = self.bounds C = (1 + self.alpha) / (hi ** (1 + self.alpha) - lo ** (1 + self.alpha)) # C = 1/(1/(self.alpha+1)*(1 - lo**(self.alpha+1))) return C * x ** self.alpha def _lnpdf(self, x): lo, hi = self.bounds C = (1 + self.alpha) / (hi ** (1 + self.alpha) - lo ** (1 + self.alpha)) return np.log(C) + self.alpha * np.log(x) def sample(self, n): """ cdf = C * int(x**a)|x=lo..x = C * [x**(a+1) / (a+1)] | x=lo..x = C * ([x**(a+1) / (a+1)] - [lo**(a+1) / (a+1)]) u = u/C + [lo**(a+1) / (a+1)] = x**(a+1) / (a+1) (a+1) * (u/C + [lo**(a+1) / (a+1)]) = x**(a+1) [(a+1) * (u/C + [lo**(a+1) / (a+1)])] ** (1/(a+1)) = x """ lo, hi = self.bounds C = (1 + self.alpha) / (hi ** (1 + self.alpha) - lo ** (1 + self.alpha)) u = np.random.random(n) a = self.alpha return ((a + 1) * (u / C + (lo ** (a + 1) / (a + 1)))) ** (1 / (a + 1)) class FehPrior(Prior): """feh PDF based on local SDSS distribution From Jo Bovy: https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3 2D gaussian fit based on Casagrande (2011) """ def __init__(self, halo_fraction=0.001, local=True, **kwargs): self.halo_fraction = halo_fraction self.local = local super().__init__(**kwargs) def _pdf(self, x): feh = x if self.local: disk_norm = 2.5066282746310007 # integral of the below from -np.inf to np.inf disk_fehdist = ( 1.0 / disk_norm * ( 0.8 / 0.15 * np.exp(-0.5 * (feh - 0.016) ** 2.0 / 0.15 ** 2.0) + 0.2 / 0.22 * np.exp(-0.5 * (feh + 0.15) ** 2.0 / 0.22 ** 2.0) ) ) else: mu, sig = -0.3, 0.3 disk_fehdist = 1.0 / np.sqrt(2 * np.pi) / sig * np.exp(-0.5 * (feh - mu) ** 2 / sig ** 2) halo_mu, halo_sig = -1.5, 0.4 halo_fehdist = ( 1.0 / np.sqrt(2 * np.pi * halo_sig ** 2) * np.exp(-0.5 * (feh - halo_mu) ** 2 / halo_sig ** 2) ) return self.halo_fraction * halo_fehdist + (1 - self.halo_fraction) * disk_fehdist def sample(self, n): if self.local: w1, mu1, sig1 = 0.8, 0.016, 0.15 w2, mu2, sig2 = 0.2, -0.15, 0.22 else: w1, mu1, sig1 = 1.0, -0.3, 0.3 w2, mu2, sig2 = 0.0, 0, 1 halo_mu, halo_sig = -1.5, 0.4 x1 = np.random.randn(n) * sig1 + mu1 x2 = np.random.randn(n) * sig2 + mu2 xhalo = np.random.randn(n) * halo_sig + halo_mu u1 = np.random.random(n) x = x1 m1 = u1 < w2 x[m1] = x2[m1] u2 = np.random.random(n) m2 = u2 < self.halo_fraction x[m2] = xhalo[m2] return x class EEP_prior(BoundedPrior): def __init__(self, ic, orig_prior, bounds=None): self.ic = ic self.orig_prior = orig_prior self._bounds = bounds if bounds is not None else ic.eep_bounds self._norm = 1.0 self.orig_par = ic.eep_replaces if self.orig_par == "age": self.deriv_prop = "dt_deep" elif self.orig_par == "mass": self.deriv_prop = "dm_deep" else: raise ValueError("wtf.") def _pdf(self, eep, **kwargs): if self.orig_par == "age": pars = [kwargs["mass"], eep, kwargs["feh"]] elif self.orig_par == "mass": pars = [eep, kwargs["age"], kwargs["feh"]] orig_val, dx_deep = self.ic.interp_value(pars, [self.orig_par, self.deriv_prop]).squeeze() return self.orig_prior(orig_val) * dx_deep def sample(self, n, **kwargs): eeps = pd.Series(np.arange(self.bounds[0], self.bounds[1])).sample(n, replace=True) if self.orig_par == "age": mass = kwargs["mass"] if isinstance(mass, pd.Series): mass = mass.values feh = kwargs["feh"] if isinstance(feh, pd.Series): feh = feh.values values = self.ic.interp_value([mass, eeps, feh], ["dt_deep", "age"]) deriv_val, orig_val = values[:, 0], values[:, 1] orig_pr = np.array([self.orig_prior(v) for v in orig_val]) # weights = orig_pr * np.log10(orig_val)/np.log10(np.e) * deriv_val # why like this? weights = orig_pr * deriv_val elif self.orig_par == "mass": age = kwargs["age"] if isinstance(age, pd.Series): age = age.values feh = kwargs["feh"] if isinstance(feh, pd.Series): feh = feh.values values = self.ic.interp_value([eeps, age, feh], ["dm_deep", "mass"]) deriv_val, orig_val = values[:, 0], values[:, 1] orig_pr = np.array([self.orig_prior(v) for v in orig_val]) weights = orig_pr * deriv_val try: return eeps.sample(n, weights=weights, replace=True).values except ValueError: # If there are no valid samples, just run it again until you get valid results return self.sample(n, **kwargs) def test_integral(self): pass # Utility numba PDFs for speed! @nb.jit(nopython=True) def powerlaw_pdf(x, alpha, lo, hi): alpha_plus_one = alpha + 1 C = alpha_plus_one / (hi ** alpha_plus_one - lo ** alpha_plus_one) return C * x ** alpha @nb.jit(nopython=True) def powerlaw_lnpdf(x, alpha, lo, hi): alpha_plus_one = alpha + 1 C = alpha_plus_one / (hi ** alpha_plus_one - lo ** alpha_plus_one) return log(C) + alpha * log(x) class AgePrior(FlatLogPrior): """Uniform true age prior, where 'age' is actually log10(age) """ def __init__(self, **kwargs): super().__init__(bounds=(5, 10.15), **kwargs) class DistancePrior(PowerLawPrior): def __init__(self, max_distance=10000, **kwargs): super().__init__(alpha=2.0, bounds=(0, max_distance), **kwargs) class AVPrior(FlatPrior): def __init__(self, **kwargs): bounds = kwargs.pop("bounds", (0, 1.0)) super().__init__(bounds=bounds) class QPrior(PowerLawPrior): def __init__(self, **kwargs): bounds = kwargs.pop("bounds", (0.1, 1)) super().__init__(alpha=0.3, bounds=bounds, **kwargs) class SalpeterPrior(PowerLawPrior): def __init__(self, **kwargs): bounds = kwargs.pop("bounds", (0.1, 10)) super().__init__(alpha=-2.35, bounds=bounds, **kwargs) class ChabrierPrior(BrokenPrior): def __init__(self, **kwargs): bounds = kwargs.pop("bounds", (0.1, 100.0)) super().__init__( [LogNormalPrior(np.log(0.079), 0.69 * np.log(10)), PowerLawPrior(-2.35, (1.0, 100.0))], [1.0], bounds=bounds, **kwargs ) # from Chabrier 2003, Eqn 17
mit
-6,676,794,714,651,641,000
30.22158
130
0.522155
false
PaddlePaddle/Paddle
python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py
2
10102
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle import paddle.fluid as fluid def add_fn(x): x = x + 1 return x def loss_fn(x, lable): loss = fluid.layers.cross_entropy(x, lable) return loss def dyfunc_with_if_else(x_v, label=None): if fluid.layers.mean(x_v).numpy()[0] > 5: x_v = x_v - 1 else: x_v = x_v + 1 # plain if in python if label is not None: loss = fluid.layers.cross_entropy(x_v, label) return loss return x_v def dyfunc_with_if_else2(x, col=100): row = 0 if abs(col) > x.shape[-1]: # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed. # col = -1 col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64") if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[row][col]: y = fluid.layers.relu(x) else: x_pow = fluid.layers.pow(x, 2) y = fluid.layers.tanh(x_pow) return y def dyfunc_with_if_else3(x): # Create new var in parent scope, return it in true_fn and false_fn. # The var is created only in one of If.body or If.orelse node, and it used as gast.Load firstly after gast.If node. # The transformed code: """ q = paddle.jit.dy2static. data_layer_not_check(name='q', shape=[-1], dtype='float32') z = paddle.jit.dy2static. data_layer_not_check(name='z', shape=[-1], dtype='float32') def true_fn_0(q, x, y): x = x + 1 z = x + 2 q = x + 3 return q, x, y, z def false_fn_0(q, x, y): y = y + 1 z = x - 2 m = x + 2 n = x + 3 return q, x, y, z q, x, y, z = fluid.layers.cond(fluid.layers.mean(x)[0] < 5, lambda : paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y), lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q, x, y)) """ y = x + 1 # NOTE: x_v[0] < 5 is True if fluid.layers.mean(x).numpy()[0] < 5: x = x + 1 z = x + 2 q = x + 3 else: y = y + 1 z = x - 2 m = x + 2 n = x + 3 q = q + 1 n = q + 2 x = n return x def dyfunc_with_if_else_with_list_geneator(x): if 10 > 5: y = paddle.add_n( [paddle.full( shape=[2], fill_value=v) for v in range(5)]) else: y = x return y def nested_if_else(x_v): batch_size = 16 feat_size = x_v.shape[-1] bias = fluid.layers.fill_constant([feat_size], dtype='float32', value=1) if x_v.shape[0] != batch_size: # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed. # col = -1 # batch_size = x_v.shape[0] batch_size = fluid.layers.shape(x_v)[0] # if tensor.shape is [1], now support to compare with numpy. if fluid.layers.mean(x_v).numpy() < 0: y = x_v + bias w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) if y.numpy()[0] < 10: tmp = y * w y = fluid.layers.relu(tmp) if fluid.layers.mean(y).numpy()[0] < batch_size: y = fluid.layers.abs(y) else: tmp = fluid.layers.fill_constant( [feat_size], dtype='float32', value=-1) y = y - tmp else: y = x_v - bias return y def nested_if_else_2(x): y = fluid.layers.reshape(x, [-1, 1]) b = 2 if b < 1: # var `z` is not visible for outer scope z = y x_shape_0 = x.shape[0] if x_shape_0 < 1: if fluid.layers.shape(y).numpy()[0] < 1: res = fluid.layers.fill_constant( value=2, shape=x.shape, dtype="int32") # `z` is a new var here. z = y + 1 else: res = fluid.layers.fill_constant( value=3, shape=x.shape, dtype="int32") else: res = x return res def nested_if_else_3(x): y = fluid.layers.reshape(x, [-1, 1]) b = 2 # var `z` is visible for func.body if b < 1: z = y else: z = x if b < 1: res = x # var `out` is only visible for current `if` if b > 1: out = x + 1 else: out = x - 1 else: y_shape = fluid.layers.shape(y) if y_shape.numpy()[0] < 1: res = fluid.layers.fill_constant( value=2, shape=x.shape, dtype="int32") # `z` is created in above code block. z = y + 1 else: res = fluid.layers.fill_constant( value=3, shape=x.shape, dtype="int32") # `out` is a new var. out = x + 1 return res class NetWithControlFlowIf(fluid.dygraph.Layer): def __init__(self, hidden_dim=16): super(NetWithControlFlowIf, self).__init__() self.hidden_dim = hidden_dim self.fc = fluid.dygraph.Linear( input_dim=hidden_dim, output_dim=5, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.99)), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.5))) self.alpha = 10. self.constant_vars = {} def forward(self, input): hidden_dim = input.shape[-1] if hidden_dim != self.hidden_dim: raise ValueError( "hidden_dim {} of input is not equal to FC.weight[0]: {}" .format(hidden_dim, self.hidden_dim)) self.constant_vars['bias'] = fluid.layers.fill_constant( [5], dtype='float32', value=1) # Control flow `if` statement fc_out = self.fc(input) if fluid.layers.mean(fc_out).numpy()[0] < 0: y = fc_out + self.constant_vars['bias'] self.constant_vars['w'] = fluid.layers.fill_constant( [5], dtype='float32', value=10) if y.numpy()[0] < self.alpha: # Create new var, but is not used. x = 10 tmp = y * self.constant_vars['w'] y = fluid.layers.relu(tmp) # Nested `if/else` if y.numpy()[-1] < self.alpha: # Modify variable of class self.constant_vars['w'] = fluid.layers.fill_constant( [hidden_dim], dtype='float32', value=9) y = fluid.layers.abs(y) else: tmp = fluid.layers.fill_constant( [5], dtype='float32', value=-1) y = y - tmp else: y = fc_out - self.constant_vars['bias'] loss = fluid.layers.mean(y) return loss def if_with_and_or(x_v, label=None): batch_size = fluid.layers.shape(x_v) if x_v is not None and (fluid.layers.mean(x_v).numpy()[0] > 0 or label is not None) and batch_size[0] > 1 and True: x_v = x_v - 1 else: x_v = x_v + 1 if label is not None: loss = fluid.layers.cross_entropy(x_v, label) return loss return x_v def if_with_and_or_1(x, y=None): batch_size = fluid.layers.shape(x) if batch_size[0] > 1 and y is not None: x = x + 1 if y is not None or batch_size[0] > 1: x = x - 1 return x def if_with_and_or_2(x, y=None): batch_size = fluid.layers.shape(x) if x is not None and batch_size[0] > 1 and y is not None: x = x + 1 if batch_size[0] > 1 or y is not None or x is not None: x = x - 1 return x def if_with_and_or_3(x, y=None): batch_size = fluid.layers.shape(x) mean_res = fluid.layers.mean(x) if x is not None and batch_size[0] > 1 and y is not None and mean_res.numpy( )[0] > 0: x = x + 1 if mean_res.numpy()[0] > 0 and (x is not None and batch_size[0] > 1) and y: x = x - 1 return x def if_with_and_or_4(x, y=None): batch_size = fluid.layers.shape(x) mean_res = fluid.layers.mean(x) if (x is not None and batch_size[0] > 1) or (y is not None and mean_res.numpy()[0] > 0): x = x + 1 if (x is not None or batch_size[0] > 1) and (y is not None or mean_res.numpy()[0] > 0): x = x - 1 return x def if_with_class_var(x, y=None): class Foo(object): def __init__(self): self.a = 1 self.b = 2 foo = Foo() batch_size = fluid.layers.shape(x) mean_res = fluid.layers.mean(x) if batch_size[0] > foo.a: x = x + foo.b else: x = x - foo.b return x def if_tensor_case(x): x = fluid.dygraph.to_variable(x) mean = fluid.layers.mean(x) # It is equivalent to `if mean != 0` if mean: for i in range(0, 10): if i > 5: x += 1 break x += 1 else: for i in range(0, 37): x += 1 break x += i # join `and`/`or` if fluid.layers.mean(x) + 1 and mean > 1 and x is not None or 2 > 1: x -= 1 # `not` statement if not (x[0][0] and (mean * x)[0][0]): x += 1 return x
apache-2.0
5,161,546,916,633,578,000
28.538012
119
0.517323
false
zuphilip/ocropy
ocrolib/exceptions.py
5
2139
import inspect import numpy def summary(x): """Summarize a datatype as a string (for display and debugging).""" if type(x)==numpy.ndarray: return "<ndarray %s %s>"%(x.shape,x.dtype) if type(x)==str and len(x)>10: return '"%s..."'%x if type(x)==list and len(x)>10: return '%s...'%x return str(x) ################################################################ ### Ocropy exceptions ################################################################ class OcropusException(Exception): trace = 1 def __init__(self,*args,**kw): Exception.__init__(self,*args,**kw) class Unimplemented(OcropusException): trace = 1 "Exception raised when a feature is unimplemented." def __init__(self,s): Exception.__init__(self,inspect.stack()[1][3]) class Internal(OcropusException): trace = 1 "Exception raised when a feature is unimplemented." def __init__(self,s): Exception.__init__(self,inspect.stack()[1][3]) class RecognitionError(OcropusException): trace = 1 "Some kind of error during recognition." def __init__(self,explanation,**kw): self.context = kw s = [explanation] s += ["%s=%s"%(k,summary(kw[k])) for k in kw] message = " ".join(s) Exception.__init__(self,message) class Warning(OcropusException): trace = 0 def __init__(self,*args,**kw): OcropusException.__init__(self,*args,**kw) class BadClassLabel(OcropusException): trace = 0 "Exception for bad class labels in a dataset or input." def __init__(self,s): Exception.__init__(self,s) class BadImage(OcropusException): trace = 0 def __init__(self,*args,**kw): OcropusException.__init__(self,*args) class BadInput(OcropusException): trace = 0 def __init__(self,*args,**kw): OcropusException.__init__(self,*args,**kw) class FileNotFound(OcropusException): trace = 0 """Some file-not-found error during OCRopus processing.""" def __init__(self,fname): self.fname = fname def __str__(self): return "file not found %s"%(self.fname,)
apache-2.0
-5,357,816,646,254,686,000
28.30137
71
0.570827
false
Milias/ModellingSimulation
Week7/python/lattice.py
1
3006
# -*- coding: utf8 -*- from numpy import * import json def RecursiveSC(level, N, count, points): if level: for i in range(N[level]): RecursiveSC(level - 1, N, count, points) count[level] += 1 count[level] = 0 else: for i in range(N[0]): points.append(count[:]) count[0] += 1 count[0] = 0 def GenerateSC(dim, part_radius, N, scale, filename): try: f = open(filename, "w+") except Exception as e: return str(e) data = {} data["Dimensions"] = dim data["SavedSteps"] = 1 data["ParticlesRadius"] = part_radius basis = scale * eye(dim) lattice_points = [] RecursiveSC(dim - 1, N, [0] * dim, lattice_points) data["ParticlesNumber"] = len(lattice_points) lattice_points = array(lattice_points) data["Particles"] = [[]] for p in lattice_points: data["Particles"][0].append(list(sum(basis * p,axis=1))) particles = array(data["Particles"][0]) data["SystemSize"] = [list(amin(particles, axis=0) - 0.001), list(amax(particles, axis=0) + 0.001)] f.write(json.dumps(data)) f.close() return "Saved to %s successfully." % filename def GenerateFCC(N, part_radius, scale, filename): try: f = open(filename, "w+") except Exception as e: return str(e) data = {} data["Dimensions"] = 3 data["SavedSteps"] = 1 data["ParticlesRadius"] = part_radius basis = scale * array([[0.5,0.5,0.0],[0.5,0.0,0.5],[0.0,0.5,0.5]]) lattice_points = [] for i in range(N[0]): lattice_points.append((i, i, -i)) lattice_points.append((i+1,i,-i)) lattice_points.append((i,i+1,-i)) lattice_points.append((i,i,-i+1)) for j in range(N[1]): lattice_points.append((i+j, i-j, -i+j)) lattice_points.append((i+j+1,i-j,j-i)) lattice_points.append((i+j,i-j+1,j-i)) lattice_points.append((i+j,i-j,j-i+1)) for k in range(N[2]): lattice_points.append((i+j-k, i-j+k, -i+j+k)) lattice_points.append((i+j-k+1,i-j+k,j-i+k)) lattice_points.append((i+j-k,i-j+k+1,j-i+k)) lattice_points.append((i+j-k,i-j+k,j-i+k+1)) lattice_points = list(set(lattice_points)) data["ParticlesNumber"] = len(lattice_points) lattice_points = array(lattice_points) data["Particles"] = [[]] for p in lattice_points: data["Particles"][0].append(list(sum(basis * p, axis=1))) particles = array(data["Particles"][0]) data["SystemSize"] = [list(amin(particles, axis=0) - (part_radius + 0.001)), list(amax(particles, axis=0) + (part_radius + 0.001))] print(data["SystemSize"]) print(prod(array(data["SystemSize"][1])-array(data["SystemSize"][0]))) print(data["ParticlesNumber"]) print(data["ParticlesNumber"]/prod(array(data["SystemSize"][1])-array(data["SystemSize"][0]))) f.write(json.dumps(data)) f.close() return "Saved to %s successfully." % filename #GenerateFCC([4]*3, 0.5, 1.47, "data/lattice/fcc-bb.json") GenerateFCC([1]*3, 0.5, 1.47*6, "data/lattice/fcc-ultralow.json") #GenerateFCC([20,10,1], 0.5, 2.3, "data/lattice/fcc-nvt.json")
mit
875,898,410,095,977,100
29.673469
133
0.617432
false
FedericoMuciaccia/SistemiComplessi
src/percolation.py
1
5670
# coding: utf-8 # # Percolation # In[2]: import numpy, networkx, pandas # import graph_tool # from graph_tool.all import * # from matplotlib import pyplot # %matplotlib inline # In[3]: # simple parallelization # import multiprocessing # cpus = multiprocessing.cpu_count() # pool = multiprocessing.Pool(processes=cpus) # pool.map(...) # ## Random failure # In[4]: def randomFailure(graph, steps=101): initialGraph = graph initialGraphSize = networkx.number_of_nodes(initialGraph) numbersOfNodesToRemove = numpy.linspace(0, initialGraphSize, num=steps, dtype='int') initialNodes = initialGraph.nodes() randomizedNodes = numpy.random.permutation(initialNodes) def analyzeSingleGraph(index): # TODO vedere se si possono agevolmente parallelizzare le list comprehension # che sono molto più scorrevoli da usare ripetto a map() newGraph = initialGraph.copy() newGraph.remove_nodes_from(randomizedNodes[0:index]) newGraphSize = networkx.number_of_nodes(newGraph) grado = newGraph.degree().items() # subgraphs = sorted(networkx.connected_component_subgraphs(newGraph), key = len, reverse=True) subgraphs = networkx.connected_component_subgraphs(newGraph) try: # giantCluster = subgraphs[0] giantCluster = max(subgraphs, key = networkx.number_of_nodes) giantClusterSize = networkx.number_of_nodes(giantCluster) relativeGiantClusterSize = numpy.true_divide(giantClusterSize, newGraphSize) # diameter = networkx.diameter(giantCluster, e=None) diameter = 0 except: giantCluster = networkx.empty_graph() giantClusterSize = 0 relativeGiantClusterSize = 0 diameter = 0 return relativeGiantClusterSize, diameter # TODO parallelizzare questa mappa failureResults = map(analyzeSingleGraph, numbersOfNodesToRemove) failureDataframe = pandas.DataFrame(failureResults, columns=['relativeGiantClusterSize', 'diameter']) ascisse = numpy.linspace(0,100, num=steps, dtype='int') failureDataframe['percentuale'] = ascisse return failureDataframe # ## Intentional attack # In[5]: def intentionalAttack(graph, steps=101): initialGraph = graph initialGraphSize = networkx.number_of_nodes(initialGraph) numbersOfNodesToRemove = numpy.linspace(0, initialGraphSize, num=steps, dtype='int') initialNodes = initialGraph.nodes() initialDegrees = initialGraph.degree() degreeDataframe = pandas.DataFrame(initialDegrees.items(), columns=['ID', 'degree']) degreeDataframe.sort(["degree"], ascending=[False], inplace=True) # TODO vedere se si può fare a meno di una colonna # degreeDataframe = degreeDataframe.reset_index(drop=True) sortedNodes = degreeDataframe['ID'].values # TODO degreeDataframe.ID def analyzeSingleGraph(number): # TODO vedere se si possono agevolmente parallelizzare le list comprehension # che sono molto più scorrevoli da usare ripetto a map() newGraph = initialGraph.copy() newGraph.remove_nodes_from(sortedNodes[0:number]) # TODO vedere ordinamento più veloce newGraphSize = networkx.number_of_nodes(newGraph) grado = newGraph.degree().items() # subgraphs = sorted(networkx.connected_component_subgraphs(newGraph), key = len, reverse=True) subgraphs = networkx.connected_component_subgraphs(newGraph) try: # giantCluster = subgraphs[0] giantCluster = max(subgraphs, key = networkx.number_of_nodes) giantClusterSize = networkx.number_of_nodes(giantCluster) relativeGiantClusterSize = numpy.true_divide(giantClusterSize, newGraphSize) # diameter = networkx.diameter(giantCluster, e=None) diameter = 0 except: giantCluster = networkx.empty_graph() giantClusterSize = 0 relativeGiantClusterSize = 0 diameter = 0 return relativeGiantClusterSize, diameter # TODO parallelizzare questa mappa attackResults = map(analyzeSingleGraph, numbersOfNodesToRemove) attackDataframe = pandas.DataFrame(attackResults, columns=['relativeGiantClusterSize', 'diameter']) ascisse = numpy.linspace(0,100, num=steps, dtype='int') attackDataframe['percentuale'] = ascisse return attackDataframe # In[7]: #gestori = ["Tim", "Vodafone", "Wind", "Tre", "Roma"] #colori = ['#004184','#ff3300','#ff8000','#018ECC', '#4d4d4d'] #gestori = ["Tim", "Vodafone", "Wind", "Tre"] #colori = ['#004184','#ff3300','#ff8000','#018ECC'] gestori = ["Roma"] colori = ['#004184'] # In[9]: # data reading, calculations, data writing # TODO parallelizzare for provider in gestori: # read data adjacencyMatrix = numpy.genfromtxt(("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(provider)), delimiter=',', dtype='int') providerGraph = networkx.Graph(adjacencyMatrix) # calculate results print provider, "random failure:" get_ipython().magic(u'time failureResults = randomFailure(providerGraph, steps=10) # default: steps=101') # print provider, "intentional attack:" # %time attackResults = intentionalAttack(providerGraph, steps=101) # write on file failureResults.to_csv('../data/percolation/ComparazioneRandom{0}.csv'.format(provider), index=False) # attackResults.to_csv('../data/percolation/intentionalAttack_{0}.csv'.format(provider), index=False) # In[ ]: # In[ ]:
mit
-4,597,681,802,934,022,000
32.526627
129
0.678433
false
pokornyv/SPEpy
parlib2.py
1
13136
########################################################### # SPEpy - simplified parquet equation solver for SIAM # # Copyright (C) 2019 Vladislav Pokorny; pokornyv@fzu.cz # # homepage: github.com/pokornyv/SPEpy # # parlib2.py - library of functions # ########################################################### import scipy as sp from scipy.integrate import simps from scipy.fftpack import fft,ifft from scipy.optimize import brentq,fixed_point,root from parlib import KramersKronigFFT,Filling,TwoParticleBubble,WriteFileX from time import time from config_siam import * absC = lambda z: sp.real(z)**2+sp.imag(z)**2 ########################################################### ## integrals over the Green function ###################### def DeterminantGD(Lambda,Gup_A,Gdn_A): ''' determinant ''' Det_A = 1.0+Lambda*(GG1_A+GG2_A) return Det_A def ReBDDFDD(Gup_A,Gdn_A,printint): ''' function to calculate the sum of real parts of FD and BD integrals ''' Int1_A = sp.imag(1.0/sp.flipud(sp.conj(Det_A)))*sp.real(Gup_A*sp.flipud(Gdn_A)) Int2_A = sp.imag(Gup_A*sp.flipud(sp.conj(Gdn_A))/sp.flipud(sp.conj(Det_A))) ## here we multiply big and small numbers for energies close to zero #RBF1_A = sp.exp(sp.log(FB_A)+sp.log(Int1_A)) RBF1_A = -FB_A*Int1_A RBF1_A[Nhalf] = (RBF1_A[Nhalf-1] + RBF1_A[Nhalf+1])/2.0 RBF2_A = -FD_A*Int2_A TailL2 = -0.5*RBF2_A[0]*En_A[0] ## leading-order, 1/x**3 tail correction to Int2_A RBF = (simps(RBF1_A+RBF2_A,En_A)+TailL2)/sp.pi if printint: WriteFileX([Int1_A,Int2_A,RBF1_A,RBF2_A],50.0,3,'','RBF.dat') print('{0: .5f}\t{1: .8f}\t{2: .8f}'\ .format(T,simps(RBF1_A,En_A)/sp.pi,(simps(RBF2_A,En_A)+TailL2)/sp.pi),flush=True) #exit() return RBF ########################################################### ## correlators of Green functions ######################### def CorrelatorGG(G1_A,G2_A,En_A,i1,i2): ''' <G1(x+i10)G2(x+w+i20)>, i1 and i2 are imaginary parts of arguments ''' ## zero-padding the arrays, G1 and G2 are complex functions FDex_A = sp.concatenate([FD_A[Nhalf:],sp.zeros(2*Nhalf+3),FD_A[:Nhalf]]) G1ex_A = sp.concatenate([G1_A[Nhalf:],sp.zeros(2*Nhalf+3),G1_A[:Nhalf]]) G2ex_A = sp.concatenate([G2_A[Nhalf:],sp.zeros(2*Nhalf+3),G2_A[:Nhalf]]) if i1*i2 > 0: G1ex_A = sp.conj(G1ex_A) ftF1_A = fft(FDex_A*G1ex_A) ftF2_A = fft(G2ex_A) if i2 > 0: ftF1_A = sp.conj(ftF1_A) else: ftF2_A = sp.conj(ftF2_A) GG_A = ifft(ftF1_A*ftF2_A*dE) ## undo the zero padding GG_A = sp.concatenate([GG_A[3*Nhalf+4:],GG_A[:Nhalf+1]]) TailL = -sp.real(G1_A)[0]*sp.real(G2_A)[0]*En_A[0] ## leading tail correction return -(GG_A+TailL)/sp.pi def CorrelatorImGGzero(G1_A,G2_A,i1,i2): ''' <G1(x+i10)G2(x+i20)>, w=0 element of the CorrelatorGG ''' if i1 < 0: G1_A = sp.conj(G1_A) if i2 < 0: G2_A = sp.conj(G2_A) Int_A = FD_A*sp.imag(G1_A*G2_A) Int = simps(Int_A,En_A) #print(Int_A[ 0],Int_A[-1]) #TailL = -sp.real(Int_A[ 0])*En_A[ 0] #TailR = sp.real(Int_A[-1])*En_A[-1] #return -(Int+TailL+TailR)/sp.pi return -Int/sp.pi def SusceptibilitySpecD(L,chiT,GFint_A): ''' susceptibility ''' Int = simps(FD_A*sp.imag(GFint_A**2),En_A)/sp.pi ## what about tail??? return (2.0+L*chiT)*Int ########################################################### ## two-particle vertex #################################### def KvertexD(Lambda,Gup_A,Gdn_A): ''' reducible K vertex Eq. (39ab) ''' K = -Lambda**2*CorrelatorImGGzero(Gdn_A,Gup_A,1,1) return K def LambdaVertexD(Gup_A,Gdn_A,Lambda): ''' calculates the Lambda vertex for given i ''' global GG1_A,GG2_A,Det_A Det_A = DeterminantGD(Lambda,Gup_A,Gdn_A) K = KvertexD(Lambda,Gup_A,Gdn_A) # GFn_A = 0.5*(Gup_A-sp.flipud(sp.conj(Gup_A))) # XD = ReBDDFDD(GFn_A,GFn_A,0) XD = ReBDDFDD(Gup_A,Gdn_A,0) Lambda = U/(1.0+K*XD) print('# Lambda: {0: .8f} {1:+8f}i'.format(float(sp.real(Lambda)),float(sp.imag(Lambda)))) print('# X: {0: .8f}'.format(XD)) print('# K: {0: .8f} {1:+8f}i'.format(float(sp.real(K)),float(sp.imag(K)))) return Lambda def CalculateLambdaD(Gup_A,Gdn_A,Lambda): ''' main solver for the Lambda vertex ''' global GG1_A,GG2_A,alpha Lold = 1e8 if SCsolver == 'iter': diffL = 1e8 ## correlators don't change with Lambda iterations t = time() if chat: print('# - - calculating correlators... ',end='',flush=True) GG1_A = CorrelatorGG(sp.imag(Gup_A),Gdn_A,En_A, 1, 1) GG2_A = CorrelatorGG(sp.imag(Gdn_A),Gup_A,En_A, 1,-1) if chat: print(' done in {0: .2f} seconds.'.format(time()-t)) #from parlib import WriteFileX #WriteFileX([GG1_A,GG2_A],100.0,3,'','GGcorr.dat') k = 1 while sp.fabs(sp.real(Lambda-Lold))>epsl: Lold = Lambda if SCsolver == 'fixed': Eqn = lambda x: LambdaVertexD(Gup_A,Gdn_A,x) try: Lambda = fixed_point(Eqn,Lambda,xtol=epsl) except RuntimeError: print("# - Error: CalculateLambdaD: No convergence in fixed-point algorithm.") print("# - Switch SCsolver to 'iter' or 'root' in siam.in and try again.") exit(1) elif SCsolver == 'brentq': Uc = -1.0/TwoParticleBubble(Gup_A,Gdn_A,'eh')[Nhalf] print(Uc) Eqn = lambda x: LambdaVertexD(Gup_A,Gdn_A,x)-x try: Lambda = brentq(Eqn,0.0,Uc-1e-12) if chat: print("# - - convergence check: {0: .5e}".format(Eqn(Lambda))) except RuntimeError: print("# - Error: CalculateLambdaD: No convergence in Brent algorithm.") print("# - Switch SCsolver to 'iter' or 'root' in siam.in and try again.") exit(1) break ## we don't need the outer loop here elif SCsolver == 'iter': print('# alpha: {0: .6f}'.format(alpha)) diffLold = diffL Eqn = lambda x: LambdaVertexD(Gup_A,Gdn_A,x) Lambda = alpha*Eqn(Lambda) + (1.0-alpha)*Lold diffL = sp.fabs(sp.real(Lambda-Lold)) if diffL<diffLold: alpha = sp.amin([1.05*alpha,1.0]) elif SCsolver == 'root': ## originally implemented for two complex Lambdas as 4-dimensional problem ## now just a check... sad story ErrConv = 0 eqn = lambda x: LambdaVertexD(Gup_A,Gdn_A,x)-x sol = root(eqn,[Lambda],method='hybr') if sol.success: Lambda = sol.x[0] if chat: print("# - - number of function calls: {0: 3d}".format(sol.nfev)) if chat: print("# - - convergence check: {0: .5e}".format(sol.fun[0])) for x in sol.fun: if sp.fabs(x)>epsl: print('# - - Warning: CalculateLambdaD: Convergence criteria for Lambda not satisfied!') ErrConv = 1 #print(sol.status) # 1 - gtol satisfied, 2 - ftol satisfied else: print("# - - Error: CalculateLambdaD: no solution by MINPACK root. Message from root:") print("# - - "+sol.message) exit(1) if ErrConv: print("# - - Error: CalculateLambdaD: no convergence in MINPACK root routine.") print("# - - Switch SCsolver to 'iter' or 'fixed' in siam.in and try again.") exit(1) break ## we don't need the outer loop here else: print('# - - Error: CalculateLambdaD: Unknown SCsolver') exit(1) if chat: print('# - - iter. {0: 3d}: Lambda: {1: .8f}'.format(k,sp.real(Lambda))) if k > 1000: print('# - - Error: CalculateLambdaD: No convergence after 1000 iterations. Exit.') exit(1) k += 1 return Lambda ########################################################### ## static self-energy ##################################### def VecSigmaT(Sigma0in,Sigma1in,Lambda,GFlambda,DLambda): ''' calculates normal and anomalous static self-energy, returns differences ''' #Sigma1in = RSigma1in+1.0j*ISigma1in Gup_A = GFlambda(En_A-ed-Sigma0in+(h-Sigma1in)) Gdn_A = GFlambda(En_A-ed-Sigma0in-(h-Sigma1in)) if T == 0.0: nTup = sp.real(DLambda(ed+Sigma0in-(h-Sigma1in))) nTdn = sp.real(DLambda(ed+Sigma0in+(h-Sigma1in))) else: nTup = Filling(Gup_A) nTdn = Filling(Gdn_A) Sigma0 = U*(nTup+nTdn-1.0)/2.0 Sigma1 = -Lambda*(nTup-nTdn)/2.0 return [Sigma0-Sigma0in,Sigma1-Sigma1in] def CalculateSigmaT(Lambda,S0,S1,GFlambda,DLambda): ''' solver for the static self-energy ''' eqn = lambda x: VecSigmaT(x[0],x[1],Lambda,GFlambda,DLambda) #sol = root(eqn,[S0,sp.real(S1),sp.imag(S1)],method='lm') sol = root(eqn,[S0,S1],method='hybr') if sol.success: [Sigma0,Sigma1] = [sol.x[0],sol.x[1]] if chat: print("# - - number of function calls: {0: 3d}".format(sol.nfev)) if chat: print("# - - convergence check: {0: .5e}, {1: .5e}".format(sol.fun[0],sol.fun[1])) else: print("# - Error: CalculateSigmaT: no solution by MINPACK root. Message from root:") print("# - - "+sol.message) exit(1) return [Sigma0,Sigma1] ########################################################### ## dynamic self-energy #################################### def CorrelatorsSE(Gup_A,Gdn_A,i1,i2): ''' correlators to Theta function, updated ''' ## zero-padding the arrays, G1 and G2 are complex functions FDex_A = sp.concatenate([FD_A[Nhalf:], sp.zeros(2*Nhalf+3), FD_A[:Nhalf]]) Fup_A = sp.concatenate([Gup_A[Nhalf:],sp.zeros(2*Nhalf+3),Gup_A[:Nhalf]]) Fdn_A = sp.concatenate([Gdn_A[Nhalf:],sp.zeros(2*Nhalf+3),Gdn_A[:Nhalf]]) ftIGG1_A = fft(FDex_A*sp.imag(Fdn_A))*sp.conj(fft(Fup_A))*dE ftGG2_A = sp.conj(fft(FDex_A*sp.conj(Fup_A)))*fft(Fdn_A)*dE ftGG3_A = sp.conj(fft(FDex_A*Fup_A))*fft(Fdn_A)*dE IGGs1_A = -ifft(ftIGG1_A)/sp.pi GGs2_A = -ifft(ftGG2_A)/(2.0j*sp.pi) GGs3_A = -ifft(ftGG3_A)/(2.0j*sp.pi) ## undo the zero padding IGGs1_A = sp.concatenate([IGGs1_A[3*Nhalf+4:],IGGs1_A[:Nhalf+1]]) GGs2_A = sp.concatenate([ GGs2_A[3*Nhalf+4:], GGs2_A[:Nhalf+1]]) GGs3_A = sp.concatenate([ GGs3_A[3*Nhalf+4:], GGs3_A[:Nhalf+1]]) return [IGGs1_A,GGs2_A,GGs3_A] def BubbleD(G2_A,G1_A,Lambda,spin): ''' auxiliary function (2P bubble) to calculate spectral self-energy ''' sGG1_A = CorrelatorGG(sp.imag(G2_A),G1_A,En_A, 1, 1) sGG2_A = CorrelatorGG(sp.imag(G1_A),G2_A,En_A, 1,-1) return Lambda*(sGG1_A+sGG2_A) def SelfEnergyD(Gup_A,Gdn_A,Lambda,spin): ''' dynamic self-energy, uses Kramers-Kronig relations to calculate the real part ''' if spin == 'up': BubbleD_A = BubbleD(Gup_A,Gdn_A,Lambda,spin) GF_A = sp.copy(Gdn_A) Det_A = DeterminantGD(Lambda,Gup_A,Gdn_A) else: ## spin='dn' BubbleD_A = BubbleD(Gdn_A,Gup_A,Lambda,spin) GF_A = sp.copy(Gup_A) Det_A = sp.flipud(sp.conj(DeterminantGD(Lambda,Gup_A,Gdn_A))) Kernel_A = U*BubbleD_A/Det_A ## zero-padding the arrays FDex_A = sp.concatenate([FD_A[Nhalf:],sp.zeros(2*N+3),FD_A[:Nhalf]]) BEex_A = sp.concatenate([BE_A[Nhalf:],sp.zeros(2*N+3),BE_A[:Nhalf]]) ImGF_A = sp.concatenate([sp.imag(GF_A[Nhalf:]),sp.zeros(2*Nhalf+3),sp.imag(GF_A[:Nhalf])]) ImKernel_A = sp.concatenate([sp.imag(Kernel_A[Nhalf:]),sp.zeros(2*Nhalf+3),sp.imag(Kernel_A[:Nhalf])]) ## performing the convolution ftImSE1_A = -sp.conj(fft(BEex_A*ImKernel_A))*fft(ImGF_A)*dE ftImSE2_A = -fft(FDex_A*ImGF_A)*sp.conj(fft(ImKernel_A))*dE ImSE_A = sp.real(ifft(ftImSE1_A+ftImSE2_A))/sp.pi ImSE_A = sp.concatenate([ImSE_A[3*Nhalf+4:],ImSE_A[:Nhalf+1]]) Sigma_A = KramersKronigFFT(ImSE_A) + 1.0j*ImSE_A return Sigma_A def SelfEnergyD2(Gup_A,Gdn_A,Lambda,spin): ''' dynamic self-energy, calculates the complex function from FFT ''' if spin == 'up': BubbleD_A = BubbleD(Gup_A,Gdn_A,Lambda,spin) GF_A = sp.copy(Gdn_A) Det_A = DeterminantGD(Lambda,Gup_A,Gdn_A) else: ## spin='dn' BubbleD_A = BubbleD(Gdn_A,Gup_A,Lambda,spin) GF_A = sp.copy(Gup_A) Det_A = sp.flipud(sp.conj(DeterminantGD(Lambda,Gup_A,Gdn_A))) Kernel_A = U*BubbleD_A/Det_A ## zero-padding the arrays FDex_A = sp.concatenate([FD_A[Nhalf:],sp.zeros(2*Nhalf+3),FD_A[:Nhalf]]) BEex_A = sp.concatenate([BE_A[Nhalf:],sp.zeros(2*Nhalf+3),BE_A[:Nhalf]]) GFex_A = sp.concatenate([GF_A[Nhalf:],sp.zeros(2*Nhalf+3),GF_A[:Nhalf]]) Kernelex_A = sp.concatenate([Kernel_A[Nhalf:],sp.zeros(2*Nhalf+3),Kernel_A[:Nhalf]]) ## performing the convolution ftSE1_A = -sp.conj(fft(BEex_A*sp.imag(Kernelex_A)))*fft(GFex_A)*dE ftSE2_A = +fft(FDex_A*sp.imag(GFex_A))*sp.conj(fft(Kernelex_A))*dE SE_A = ifft(ftSE1_A+ftSE2_A)/sp.pi SE_A = sp.concatenate([SE_A[3*Nhalf+4:],SE_A[:Nhalf+1]]) return SE_A def SelfEnergyD_sc(Gup_A,Gdn_A,GTup_A,GTdn_A,Lambda,spin): ''' dynamic self-energy, calculates the complex function from FFT ''' if spin == 'up': BubbleD_A = BubbleD(GTup_A,GTdn_A,Lambda,spin) GF_A = sp.copy(Gdn_A) Det_A = DeterminantGD(Lambda,GTup_A,GTdn_A) else: ## spin='dn' BubbleD_A = BubbleD(GTdn_A,GTup_A,Lambda,spin) GF_A = sp.copy(Gup_A) Det_A = sp.flipud(sp.conj(DeterminantGD(Lambda,GTup_A,GTdn_A))) Kernel_A = U*BubbleD_A/Det_A ## zero-padding the arrays FDex_A = sp.concatenate([FD_A[Nhalf:],sp.zeros(2*Nhalf+3),FD_A[:Nhalf]]) BEex_A = sp.concatenate([BE_A[Nhalf:],sp.zeros(2*Nhalf+3),BE_A[:Nhalf]]) GFex_A = sp.concatenate([GF_A[Nhalf:],sp.zeros(2*Nhalf+3),GF_A[:Nhalf]]) Kernelex_A = sp.concatenate([Kernel_A[Nhalf:],sp.zeros(2*Nhalf+3),Kernel_A[:Nhalf]]) ## performing the convolution ftSE1_A = -sp.conj(fft(BEex_A*sp.imag(Kernelex_A)))*fft(GFex_A)*dE ftSE2_A = +fft(FDex_A*sp.imag(GFex_A))*sp.conj(fft(Kernelex_A))*dE SE_A = ifft(ftSE1_A+ftSE2_A)/sp.pi SE_A = sp.concatenate([SE_A[3*Nhalf+4:],SE_A[:Nhalf+1]]) return SE_A ## parlib2.py end ###
gpl-3.0
-2,502,053,832,222,385,000
39.294479
103
0.619062
false
uzgit/ardupilot
Tools/LogAnalyzer/tests/TestPitchRollCoupling.py
3
6116
from LogAnalyzer import Test,TestResult import DataflashLog from VehicleType import VehicleType import collections class TestPitchRollCoupling(Test): '''test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning''' # TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values def __init__(self): Test.__init__(self) self.name = "Pitch/Roll" self.enable = True # TEMP def run(self, logdata, verbose): self.result = TestResult() self.result.status = TestResult.StatusType.GOOD if logdata.vehicleType != VehicleType.Copter: self.result.status = TestResult.StatusType.NA return if not "ATT" in logdata.channels: self.result.status = TestResult.StatusType.UNKNOWN self.result.statusMessage = "No ATT log data" return # figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes autoModes = ["RTL", "AUTO", "LAND", "LOITER", "GUIDED", "CIRCLE", "OF_LOITER", "POSHOLD", "BRAKE", "AVOID_ADSB", "GUIDED_NOGPS", "SMARTRTL"] # use CTUN RollIn/DesRoll + PitchIn/DesPitch manualModes = ["STABILIZE", "DRIFT", "ALTHOLD", "ALT_HOLD", "POSHOLD"] # ignore data from these modes: ignoreModes = ["ACRO", "SPORT", "FLIP", "AUTOTUNE","", "THROW",] autoSegments = [] # list of (startLine,endLine) pairs manualSegments = [] # list of (startLine,endLine) pairs orderedModes = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0])) isAuto = False # we always start in a manual control mode prevLine = 0 mode = "" for line,modepair in orderedModes.iteritems(): mode = modepair[0].upper() if prevLine == 0: prevLine = line if mode in autoModes: if not isAuto: manualSegments.append((prevLine,line-1)) prevLine = line isAuto = True elif mode in manualModes: if isAuto: autoSegments.append((prevLine,line-1)) prevLine = line isAuto = False elif mode in ignoreModes: if isAuto: autoSegments.append((prevLine,line-1)) else: manualSegments.append((prevLine,line-1)) prevLine = 0 else: raise Exception("Unknown mode in TestPitchRollCoupling: %s" % mode) # and handle the last segment, which doesn't have an ending if mode in autoModes: autoSegments.append((prevLine,logdata.lineCount)) elif mode in manualModes: manualSegments.append((prevLine,logdata.lineCount)) # figure out max lean angle, the ANGLE_MAX param was added in AC3.1 maxLeanAngle = 45.0 if "ANGLE_MAX" in logdata.parameters: maxLeanAngle = logdata.parameters["ANGLE_MAX"] / 100.0 maxLeanAngleBuffer = 10 # allow a buffer margin # ignore anything below this altitude, to discard any data while not flying minAltThreshold = 2.0 # look through manual+auto flight segments # TODO: filter to ignore single points outside range? (maxRoll, maxRollLine) = (0.0, 0) (maxPitch, maxPitchLine) = (0.0, 0) for (startLine,endLine) in manualSegments+autoSegments: # quick up-front test, only fallover into more complex line-by-line check if max()>threshold rollSeg = logdata.channels["ATT"]["Roll"].getSegment(startLine,endLine) pitchSeg = logdata.channels["ATT"]["Pitch"].getSegment(startLine,endLine) if not rollSeg.dictData and not pitchSeg.dictData: continue # check max roll+pitch for any time where relative altitude is above minAltThreshold roll = max(abs(rollSeg.min()), abs(rollSeg.max())) pitch = max(abs(pitchSeg.min()), abs(pitchSeg.max())) if (roll>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll)) or (pitch>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch)): lit = DataflashLog.LogIterator(logdata, startLine) assert(lit.currentLine == startLine) while lit.currentLine <= endLine: relativeAlt = lit["CTUN"]["BarAlt"] if relativeAlt > minAltThreshold: roll = lit["ATT"]["Roll"] pitch = lit["ATT"]["Pitch"] if abs(roll)>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll): maxRoll = roll maxRollLine = lit.currentLine if abs(pitch)>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch): maxPitch = pitch maxPitchLine = lit.currentLine next(lit) # check for breaking max lean angles if maxRoll and abs(maxRoll)>abs(maxPitch): self.result.status = TestResult.StatusType.FAIL self.result.statusMessage = "Roll (%.2f, line %d) > maximum lean angle (%.2f)" % (maxRoll, maxRollLine, maxLeanAngle) return if maxPitch: self.result.status = TestResult.StatusType.FAIL self.result.statusMessage = "Pitch (%.2f, line %d) > maximum lean angle (%.2f)" % (maxPitch, maxPitchLine, maxLeanAngle) return # TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne) # ...
gpl-3.0
-7,586,790,118,497,596,000
45.687023
157
0.567528
false
relh/keras
examples/lstm_benchmark.py
9
3008
'''Compare LSTM implementations on the IMDB sentiment classification task. consume_less='cpu' preprocesses input to the LSTM which typically results in faster computations at the expense of increased peak memory usage as the preprocessed input must be kept in memory. consume_less='mem' does away with the preprocessing, meaning that it might take a little longer, but should require less peak memory. consume_less='gpu' concatenates the input, output and forget gate's weights into one, large matrix, resulting in faster computation time as the GPU can utilize more cores, at the expense of reduced regularization because the same dropout is shared across the gates. Note that the relative performance of the different `consume_less` modes can vary depending on your device, your model and the size of your data. ''' import time import numpy as np import matplotlib.pyplot as plt from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Embedding, Dense, LSTM from keras.datasets import imdb max_features = 20000 max_length = 80 embedding_dim = 256 batch_size = 128 epochs = 10 modes = ['cpu', 'mem', 'gpu'] print('Loading data...') (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features) X_train = sequence.pad_sequences(X_train, max_length) X_test = sequence.pad_sequences(X_test, max_length) # Compile and train different models while meauring performance. results = [] for mode in modes: print('Testing mode: consume_less="{}"'.format(mode)) model = Sequential() model.add(Embedding(max_features, embedding_dim, input_length=max_length, dropout=0.2)) model.add(LSTM(embedding_dim, dropout_W=0.2, dropout_U=0.2, consume_less=mode)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) start_time = time.time() history = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=epochs, validation_data=(X_test, y_test)) average_time_per_epoch = (time.time() - start_time) / epochs results.append((history, average_time_per_epoch)) # Compare models' accuracy, loss and elapsed time per epoch. plt.style.use('ggplot') ax1 = plt.subplot2grid((2, 2), (0, 0)) ax1.set_title('Accuracy') ax1.set_ylabel('Validation Accuracy') ax1.set_xlabel('Epochs') ax2 = plt.subplot2grid((2, 2), (1, 0)) ax2.set_title('Loss') ax2.set_ylabel('Validation Loss') ax2.set_xlabel('Epochs') ax3 = plt.subplot2grid((2, 2), (0, 1), rowspan=2) ax3.set_title('Time') ax3.set_ylabel('Seconds') for mode, result in zip(modes, results): ax1.plot(result[0].epoch, result[0].history['val_acc'], label=mode) ax2.plot(result[0].epoch, result[0].history['val_loss'], label=mode) ax1.legend() ax2.legend() ax3.bar(np.arange(len(results)), [x[1] for x in results], tick_label=modes, align='center') plt.tight_layout() plt.show()
mit
3,519,658,746,247,531,500
35.240964
91
0.706782
false
cseed/hail
hail/python/test/hail/experimental/test_dnd_array.py
1
11934
import numpy as np import hail as hl from hail.utils import new_temp_file from ..helpers import startTestHailContext, stopTestHailContext, fails_local_backend setUpModule = startTestHailContext tearDownModule = stopTestHailContext def test_range_collect(): n_variants = 10 n_samples = 10 block_size = 3 mt = hl.utils.range_matrix_table(n_variants, n_samples) mt = mt.select_entries(x=mt.row_idx * mt.col_idx) da = hl.experimental.dnd.array(mt, 'x', block_size=block_size) a = np.array(mt.x.collect()).reshape(n_variants, n_samples) assert np.array_equal(da.collect(), a) @fails_local_backend() def test_range_matmul(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt = hl.utils.range_matrix_table(n_variants, n_samples) mt = mt.select_entries(x=mt.row_idx * mt.col_idx) da = hl.experimental.dnd.array(mt, 'x', block_size=block_size) da = (da @ da.T).checkpoint(new_temp_file()) assert da._force_count_blocks() == n_blocks da_result = da.collect() a = np.array(mt.x.collect()).reshape(n_variants, n_samples) a_result = a @ a.T assert np.array_equal(da_result, a_result) @fails_local_backend() def test_small_collect(): n_variants = 10 n_samples = 10 block_size = 3 mt = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt = mt.select_entries(dosage=hl.float(mt.GT.n_alt_alleles())) da = hl.experimental.dnd.array(mt, 'dosage', block_size=block_size) a = np.array(mt.dosage.collect()).reshape(n_variants, n_samples) assert np.array_equal(da.collect(), a) @fails_local_backend() def test_medium_collect(): n_variants = 100 n_samples = 100 block_size = 32 mt = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt = mt.select_entries(dosage=hl.float(mt.GT.n_alt_alleles())) da = hl.experimental.dnd.array(mt, 'dosage', block_size=block_size) a = np.array(mt.dosage.collect()).reshape(n_variants, n_samples) assert np.array_equal(da.collect(), a) @fails_local_backend() def test_small_matmul(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt = mt.select_entries(dosage=hl.float(mt.GT.n_alt_alleles())) da = hl.experimental.dnd.array(mt, 'dosage', block_size=block_size) da = (da @ da.T).checkpoint(new_temp_file()) assert da._force_count_blocks() == n_blocks da_result = da.collect() a = np.array(mt.dosage.collect()).reshape(n_variants, n_samples) a_result = a @ a.T assert np.array_equal(da_result, a_result) @fails_local_backend() def test_medium_matmul(): n_variants = 100 n_samples = 100 block_size = 32 n_blocks = 16 mt = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt = mt.select_entries(dosage=hl.float(mt.GT.n_alt_alleles())) da = hl.experimental.dnd.array(mt, 'dosage', block_size=block_size) da = (da @ da.T).checkpoint(new_temp_file()) assert da._force_count_blocks() == n_blocks da_result = da.collect() a = np.array(mt.dosage.collect()).reshape(n_variants, n_samples) a_result = a @ a.T assert np.array_equal(da_result, a_result) @fails_local_backend() def test_matmul_via_inner_product(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt = hl.utils.range_matrix_table(n_variants, n_samples) mt = mt.select_entries(x=mt.row_idx * mt.col_idx) da = hl.experimental.dnd.array(mt, 'x', block_size=block_size) prod = (da @ da.T).checkpoint(new_temp_file()) assert prod._force_count_blocks() == n_blocks prod_result = prod.collect() ip_result = da.inner_product(da.T, lambda l, r: l * r, lambda l, r: l + r, hl.float(0.0), lambda prod: hl.agg.sum(prod) ).collect() assert np.array_equal(prod_result, ip_result) @fails_local_backend() def test_king_homo_estimator(): hl.set_global_seed(1) mt = hl.balding_nichols_model(2, 5, 5) mt = mt.select_entries(genotype_score=hl.float(mt.GT.n_alt_alleles())) da = hl.experimental.dnd.array(mt, 'genotype_score', block_size=3) def sqr(x): return x * x score_difference = da.T.inner_product( da, lambda l, r: sqr(l - r), lambda l, r: l + r, hl.float(0), hl.agg.sum ).checkpoint(new_temp_file()) assert np.array_equal( score_difference.collect(), np.array([[0., 6., 4., 2., 4.], [6., 0., 6., 4., 6.], [4., 6., 0., 6., 0.], [2., 4., 6., 0., 6.], [4., 6., 0., 6., 0.]])) @fails_local_backend() def test_dndarray_sum(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) mt2 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt2 = mt2.select_entries(dosage=hl.float(mt2.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da2 = hl.experimental.dnd.array(mt2, 'dosage', block_size=block_size) da_sum = (da1 + da2).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a2 = np.array(mt2.dosage.collect()).reshape(n_variants, n_samples) a_result = a1 + a2 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_sum_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (da1 + 10).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = a1 + 10 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_rsum_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (10 + da1).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = 10 + a1 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_mul_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (da1 * 10).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = a1 * 10 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_rmul_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (10 * da1).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = 10 * a1 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_sub_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (da1 - 10).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = a1 - 10 assert np.array_equal(da_result, a_result) @fails_local_backend() def test_dndarray_rsub_scalar(): n_variants = 10 n_samples = 10 block_size = 3 n_blocks = 16 mt1 = hl.balding_nichols_model(n_populations=2, n_variants=n_variants, n_samples=n_samples) mt1 = mt1.select_entries(dosage=hl.float(mt1.GT.n_alt_alleles())) da1 = hl.experimental.dnd.array(mt1, 'dosage', block_size=block_size) da_sum = (10 - da1).checkpoint(new_temp_file()) assert da_sum._force_count_blocks() == n_blocks da_result = da_sum.collect() a1 = np.array(mt1.dosage.collect()).reshape(n_variants, n_samples) a_result = 10 - a1 assert np.array_equal(da_result, a_result) def test_dndarray_errors_on_unsorted_columns(): n_variants = 10 n_samples = 10 block_size = 3 mt = hl.utils.range_matrix_table(n_variants, n_samples) mt = mt.key_cols_by(sampleid=hl.str('zyxwvutsrq')[mt.col_idx]) mt = mt.select_entries(x=mt.row_idx * mt.col_idx) try: hl.experimental.dnd.array(mt, 'x', block_size=block_size) except ValueError as err: assert 'columns are not in sorted order', err.args[0] else: assert False @fails_local_backend() def test_dndarray_sort_columns(): n_variants = 10 n_samples = 10 block_size = 3 disorder = [0, 9, 8, 7, 1, 2, 3, 4, 6, 5] order = [x[0] for x in sorted(enumerate(disorder), key=lambda x: x[1])] mt = hl.utils.range_matrix_table(n_variants, n_samples) mt = mt.key_cols_by(sampleid=hl.literal(disorder)[mt.col_idx]) mt = mt.select_entries(x=mt.row_idx * mt.col_idx) da = hl.experimental.dnd.array(mt, 'x', block_size=block_size, sort_columns=True) a = np.array( [r * order[c] for r in range(n_variants) for c in range(n_samples)] ).reshape((n_variants, n_samples)) assert np.array_equal(da.collect(), a) result = (da.T @ da).collect() expected = a.T @ a assert np.array_equal(result, expected)
mit
8,407,039,667,649,245,000
31.606557
85
0.588906
false
bjornsturmberg/EMUstack
backend/materials.py
2
16446
""" materials.py is a subroutine of EMUstack that defines Material objects, these represent dispersive lossy refractive indices and possess methods to interpolate n from tabulated data. Copyright (C) 2015 Bjorn Sturmberg, Kokou Dossou, Felix Lawrence EMUstack is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import numpy as np from scipy.interpolate import interp1d import matplotlib import paths matplotlib.use('pdf') import matplotlib.pyplot as plt data_location = paths.data_path class Material(object): """ Represents a material with a refractive index n. If the material is dispersive, the refractive index at a given wavelength is calculated by linear interpolation from the initially given data `n`. Materials may also have `n` calculated from a Drude model with input parameters. Args: n : Either a scalar refractive index, \ an array of values `(wavelength, n)`, or \ `(wavelength, real(n), imag(n))`, \ or omega_p, omega_g, eps_inf for Drude model. Currently included materials are; .. tabularcolumns:: |c|c|c| +--------------------+------------+------------------------+ | **Semiconductors** | **Metals** | **Transparent oxides** | +--------------------+------------+------------------------+ | Si_c | Au | TiO2 | +--------------------+------------+------------------------+ | Si_a | Au_Palik | TiO2_anatase | +--------------------+------------+------------------------+ | SiO2 | Ag | ITO | +--------------------+------------+------------------------+ | CuO | Ag_Palik | ZnO | +--------------------+------------+------------------------+ | CdTe | Cu | SnO2 | +--------------------+------------+------------------------+ | FeS2 | Cu_Palik | FTO_Wenger | +--------------------+------------+------------------------+ | Zn3P2 | Al | FTO_Wengerk5 | +--------------------+------------+------------------------+ | AlGaAs | | | +--------------------+------------+------------------------+ | Al2O3 | | | +--------------------+------------+------------------------+ | Al2O3_PV | | | +--------------------+------------+------------------------+ | GaAs | | | +--------------------+------------+------------------------+ | InGaAs | **Drude** | **Other** | +--------------------+------------+------------------------+ | Si3N4 | Au_drude | Air | +--------------------+------------+------------------------+ | MgF2 | | H2O | +--------------------+------------+------------------------+ | InP | | Glass | +--------------------+------------+------------------------+ | InAs | | Spiro | +--------------------+------------+------------------------+ | GaP | | Spiro_nk | +--------------------+------------+------------------------+ | Ge | | | +--------------------+------------+------------------------+ | AlN | | | +--------------------+------------+------------------------+ | GaN | | | +--------------------+------------+------------------------+ | MoO3 | | | +--------------------+------------+------------------------+ | ZnS | | | +--------------------+------------+------------------------+ | AlN_PV | | | +--------------------+------------+------------------------+ | | | **Experimental** incl. | +--------------------+------------+------------------------+ | | | CH3NH3PbI3 | +--------------------+------------+------------------------+ | | | Sb2S3 | +--------------------+------------+------------------------+ | | | Sb2S3_ANU2014 | +--------------------+------------+------------------------+ | | | Sb2S3_ANU2015 | +--------------------+------------+------------------------+ | | | GO_2014 | +--------------------+------------+------------------------+ | | | GO_2015 | +--------------------+------------+------------------------+ | | | rGO_2015 | +--------------------+------------+------------------------+ | | | SiON_Low | +--------------------+------------+------------------------+ | | | SiON_High | +--------------------+------------+------------------------+ | | | Low_Fe_Glass | +--------------------+------------+------------------------+ | | | Perovskite_00 | +--------------------+------------+------------------------+ | | | Perovskite | +--------------------+------------+------------------------+ | | | Perovskite_b2b | +--------------------+------------+------------------------+ | | | Ge_Doped | +--------------------+------------+------------------------+ """ def __init__(self, n): if () == np.shape(n): # n is a scalar, the medium is non-dispersive. self._n = lambda x: n self.data_wls = None self.data_ns = n elif np.shape(n) == (3,): # we will calculate n from the Drude model with input omega_p, omega_g, eps_inf values c = 299792458 omega_plasma = n[0] omega_gamma = n[1] eps_inf = n[2] self.data_wls = 'Drude' self.data_ns = [omega_plasma, omega_gamma, eps_inf, c] self._n = lambda x: np.sqrt(self.data_ns[2]-self.data_ns[0]**2/(((2*np.pi*self.data_ns[3])/(x*1e-9))**2 + 1j*self.data_ns[1]*(2*np.pi*self.data_ns[3])/(x*1e-9))) if np.imag(self._n) < 0: self._n *= -1 elif np.shape(n) >= (2, 1): self.data_wls = n[:, 0] if len(n[0]) == 2: # n is an array of wavelengths and (possibly-complex) # refractive indices. self.data_ns = n[:, 1] elif len(n[0]) == 3: self.data_ns = n[:, 1] + 1j * n[:, 2] else: raise ValueError # Do cubic interpolation if we get the chance # if len(self.data_wls) > 3: # self._n = interp1d(self.data_wls, self.data_ns, 'cubic') # else: self._n = interp1d(self.data_wls, self.data_ns) # else: # raise ValueError, "You must either set a constant refractive \ # index, provide tabulated data, or Drude parameters" def n(self, wl_nm): """ Return n for the specified wavelength.""" return self._n(wl_nm) def __getstate__(self): """ Can't pickle self._n, so remove it from what is pickled.""" d = self.__dict__.copy() d.pop('_n') return d def __setstate__(self, d): """ Recreate self._n when unpickling.""" self.__dict__ = d if None is self.data_wls: self._n = lambda x: self.data_ns elif self.data_wls is 'Drude': self._n = lambda x: np.sqrt(self.data_ns[2]-self.data_ns[0]**2/(((2*np.pi*self.data_ns[3])/(x*1e-9))**2 + 1j*self.data_ns[1]*(2*np.pi*self.data_ns[3])/(x*1e-9))) if np.imag(self._n) < 0: self._n *= -1 else: self._n = interp1d(self.data_wls, self.data_ns) def plot_n_data(data_name): data = np.loadtxt(data_location+'%s.txt' % data_name) wls = data[:, 0] Re_n = data[:, 1] Im_n = data[:, 2] fig = plt.figure(figsize=(5, 2)) ax1 = fig.add_subplot(1, 1, 1) ax1.plot(wls, Re_n, 'k', linewidth=2) ax1.set_ylabel(r"Re(n)") ax2 = ax1.twinx() ax2.plot(wls, Im_n, 'r--', linewidth=2) ax2.set_ylabel(r"Im(n)") ax2.spines['right'].set_color('red') ax2.yaxis.label.set_color('red') ax2.tick_params(axis='y', colors='red') ax1.set_xlim((wls[0], wls[-1])) plt.savefig('%s_n' % data_name) Air = Material(1.00 + 0.0j) H2O = Material(np.loadtxt('%sH2O.txt' % data_location)) # G. M. Hale and M. R. Querry. doi:10.1364/AO.12.000555 # Transparent oxides TiO2 = Material(np.loadtxt('%sTiO2.txt' % data_location)) # Filmetrics.com TiO2_anatase = Material(np.loadtxt('%sTiO2_anatase.txt' % data_location)) # 500C anneal PV Lighthouse doi:/10.1016/S0927-0248(02)00473-7 ITO = Material(np.loadtxt('%sITO.txt' % data_location)) # Filmetrics.com ZnO = Material(np.loadtxt('%sZnO.txt' % data_location)) # Z. Holman 2012 unpublished http://www.pvlighthouse.com.au/resources/photovoltaic%20materials/refractive%20index/refractive%20index.aspx # Semiconductors Si_c = Material(np.loadtxt('%sSi_c.txt' % data_location)) # M. Green Prog. PV 1995 doi:10.1002/pip.4670030303 Si_a = Material(np.loadtxt('%sSi_a.txt' % data_location)) SiO2 = Material(np.loadtxt('%sSiO2.txt' % data_location)) CuO = Material(np.loadtxt('%sCuO.txt' % data_location)) CdTe = Material(np.loadtxt('%sCdTe.txt' % data_location)) FeS2 = Material(np.loadtxt('%sFeS2.txt' % data_location)) Zn3P2 = Material(np.loadtxt('%sZn3P2.txt' % data_location)) Sb2S3 = Material(np.loadtxt('%sSb2S3.txt' % data_location)) AlGaAs = Material(np.loadtxt('%sAlGaAs.txt' % data_location)) ZnS = Material(np.loadtxt('%sZnS.txt' % data_location)) SnO2 = Material(np.loadtxt('%sSnO2.txt' % data_location)) Glass = Material(np.loadtxt('%sSoda_lime_glass_nk_Pil.txt' % data_location)) # PV lighthouse, unpublished Al2O3 = Material(np.loadtxt('%sAl2O3.txt' % data_location)) # http://refractiveindex.info/?shelf=main&book=Al2O3&page=Malitson-o Al2O3_PV = Material(np.loadtxt('%sAl2O3_PV.txt' % data_location)) # PV lighthouse GaAs = Material(np.loadtxt('%sGaAs.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/GaAs/Gallium-Arsenide InGaAs = Material(np.loadtxt('%sInGaAs.txt' % data_location)) # http://refractiveindex.info/?group=CRYSTALS&material=InGaAs Si3N4 = Material(np.loadtxt('%sSi3N4.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/Si3N4/Silicon-Nitride-SiN MgF2 = Material(np.loadtxt('%sMgF2.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/MgF2/Magnesium-Fluoride InP = Material(np.loadtxt('%sInP.txt' % data_location)) InAs = Material(np.loadtxt('%sInAs.txt' % data_location)) # Filmetrics.com GaP = Material(np.loadtxt('%sGaP.txt' % data_location)) # Filmetrics.com GaN = Material(np.loadtxt('%sGaN.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/GaN/Gallium-Nitride AlN = Material(np.loadtxt('%sAlN.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/AlN/Aluminium-Nitride Ge = Material(np.loadtxt('%sGe.txt' % data_location)) # http://www.filmetrics.com/refractive-index-database/Ge/Germanium MoO3 = Material(np.loadtxt('%sMoO3.txt' % data_location)) # doi:10.1103/PhysRevB.88.115141 Spiro = Material(np.loadtxt('%sSpiro.txt' % data_location)) # doi:10.1364/OE.23.00A263 Spiro_nk = Material(np.loadtxt('%sSpiro_nk_Filipic.txt' % data_location)) # Extended Filipic data FTO_Wenger = Material(np.loadtxt('%sFTO_Wenger.txt' % data_location)) # doi:10.1021/jp111565q FTO_Wengerk5 = Material(np.loadtxt('%sFTO_Wengerk5.txt' % data_location)) # doi:10.1021/jp111565q AlN_PV = Material(np.loadtxt('%sAlN_PV.txt' % data_location)) # PV lighthouse doi:10.1002/pssr.201307153 # Metals Au = Material(np.loadtxt('%sAu_JC.txt' % data_location)) # Johnson Christy Au_Palik = Material(np.loadtxt('%sAu_Palik.txt' % data_location)) # Palik Ag = Material(np.loadtxt('%sAg_JC.txt' % data_location)) # Johnson Christy Ag_Palik = Material(np.loadtxt('%sAg_Palik.txt' % data_location)) # Palik Cu = Material(np.loadtxt('%sCu_JC.txt' % data_location)) # Johnson Christy Cu_Palik = Material(np.loadtxt('%sCu_Palik.txt' % data_location)) # Palik Al = Material(np.loadtxt('%sAl.txt' % data_location)) # McPeak ACS Photonics 2015 http://dx.doi.org/10.1021/ph5004237 # Drude model # Need to provide [omega_plasma, omega_gamma, eplison_infinity] Au_drude = Material([1.36e16, 1.05e14, 9.5]) # Johnson Christy # Less Validated CH3NH3PbI3 = Material(np.loadtxt('%sCH3NH3PbI3.txt' % data_location)) # doi:10.1021/jz502471h - EPFL Sb2S3_ANU2014 = Material(np.loadtxt('%sSb2S3_ANU2014.txt' % data_location)) # measured at Australian National Uni. Sb2S3_ANU2015 = Material(np.loadtxt('%sSb2S3_ANU2015.txt' % data_location)) # measured at Australian National Uni. GO_2014 = Material(np.loadtxt('%sGO_2014.txt' % data_location)) # Graphene Oxide measured at Swinbourne Uni. GO_2015 = Material(np.loadtxt('%sGO_2015.txt' % data_location)) # Graphene Oxide measured at Swinbourne Uni. rGO_2015 = Material(np.loadtxt('%srGO_2015.txt' % data_location)) # reduced Graphene Oxide measured at Swinbourne Uni. SiON_Low = Material(np.loadtxt('%sSiON_Low.txt' % data_location)) # measured at Australian National Uni. SiON_High = Material(np.loadtxt('%sSiON_High.txt' % data_location)) # measured at Australian National Uni. Low_Fe_Glass = Material(np.loadtxt('%sLow_Fe_Glass_Pil.txt' % data_location)) # PV lighthouse, unpublished Pilkington data Perovskite_00 = Material(np.loadtxt('%sPerovskite_E_u_00.txt' % data_location)) # doi:10.1021/jz502471h Perovskite = Material(np.loadtxt('%sPerovskite_Loper_E_u_080.txt' % data_location)) # doi:10.1021/jz502471h, with extended urbach tail for parasitic absorption Perovskite_b2b = Material(np.loadtxt('%sPerovskite_b2b_nk.txt' % data_location)) # The above data for n, k data just for band to band transitions # http://pubs.acs.org/doi/suppl/10.1021/acs.jpclett.5b00044/suppl_file/jz5b00044_si_001.pdf Ge_Doped = Material(np.loadtxt('%sGe_Doped.txt'% data_location)) # doi:10.1109/IRMMW-THz.2014.6956438, heavily doped Germanium for mid-infrared plasmonics ITO_annealed = Material(np.loadtxt('%sITO_anneal_Gen_Osc.txt'% data_location)) # ANU measurement
gpl-3.0
5,210,394,253,854,883,000
48.140244
173
0.452937
false
habi/GlobalDiagnostiX
FocusPlotLine.py
1
5139
# -*- coding: utf-8 -*- """ Script to plot line on TIFF, useful for 'calculating' the best focus of the EssentialMed Setup. Since the objective doesn't really have the focal distance Edmund optics stated... Reads TIFF-Stack with images from focus shifting on the LINOS-rail. Plots a line (coordinates read from a txt-File, see around line 45) on the 'original' image and generates a lineplot from this line side by side with the slice. Opens fiji with all images in the end so we can easily browse through all those slices and find the best focus... """ import libtiff import matplotlib.pylab as plt import linecache import os # Setup # All Apertures, "mixed media" on the intensifying screen # Series = 2 # Only extreme Apertures, only Siemens-Star Series = 3 # Leave '0' for 'natural' length of plot axis. Set to a value if scaling is # desired ScaleAxis = 600 # Just plot a horizontal line in the middle of the images (or shifted by YSHIFT # below). If set to zero, Coordinates are read from file. SimpleHorizontalLine = 0 # Shift this many pixels up from 1024, the middle of the image (Only used if # plotting SimpleHorizontalLine) YSHIFT = -50 if Series == 2: Apertures = [1.8, 4, 8, 16] elif Series == 3: Apertures = [1.8, 16] # The trick with the [-X:] loads only the X last entries of the Aperture-List. for F in Apertures[-4:]: # Open tiff file tif = libtiff.TIFFfile('/afs/psi.ch/project/EssentialMed/Images/' + str(Series) + '-FocusTest/Series_F' + str('%04.1f' % F) + '.tif') # plt.ion() for i in range(0, len(tif.pages)): plt.figure(figsize=(16, 8)) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None) # Add first subplot with Original Image and plot the line where we take # the lineprofile ax1 = plt.subplot(121) ax1.imshow(tif.asarray(key=i), cmap=plt.cm.gray) ax1.set_xlim([0, 2048]) ax1.set_ylim([2048, 0]) plt.axis('off') if SimpleHorizontalLine == 1: ax1.plot([0 + 400, 2048 - 400], [1024 - YSHIFT, 1024 - YSHIFT], 'r') # Cheat with 'line', so we have something to write at the end... line = 'asdf ' + str(0 + 400) + ' ' + str(1024 - YSHIFT) + ' ' + \ str(2048 - 400) + ' ' + str(1024 - YSHIFT) else: # Reads coordinates from a text file which will then be used to # plot stuff on images. # '+2' since we have two headerlines '+1' since python starts at # line 0 :) line = linecache.getline('/afs/psi.ch/project/EssentialMed/Images/Coordinates_UpInStar.txt', i + 3) line = linecache.getline('/afs/psi.ch/project/EssentialMed/Images/Coordinates_Arbitrary.txt', i + 3) line = linecache.getline('/afs/psi.ch/project/EssentialMed/Images/Coordinates_LowerLine.txt', i + 3) ax1.plot([line.split()[1], line.split()[3]], [line.split()[2], line.split()[4]], 'r') plt.title('Image ' + str(i)) # Plot Lineprofile ax2 = plt.subplot(122) if SimpleHorizontalLine == 1: ax2.plot(tif.asarray(key=i)[1024 - YSHIFT, 0 + 400:2048 - 400]) else: ax2.plot(tif.asarray(key=i)[str(line.split()[2]), int(line.split()[1]):int(line.split()[3])]) if ScaleAxis == 0: # scale x-axis of lineplot to real length ax2.set_xlim([0, (int(line.split()[3]) - int(line.split()[1]))]) else: # scale x-axis of lineplot to the same length for all plots ax2.set_xlim([0, ScaleAxis]) # Adapt to Brightness if F == 1.8: if Series == 2: ax2.set_ylim([0, 256]) else: ax2.set_ylim([0, 256]) # Only for Series 2... elif F == 4: ax2.set_ylim([0, 180]) # Only for Series 2... elif F == 8: ax2.set_ylim([0, 250]) elif F == 16: if Series == 2: ax2.set_ylim([0, 75]) else: ax2.set_ylim([0, 256]) plt.title('Line length: ' + str(int(line.split()[3]) - int(line.split()[1]))) plt.draw() SaveName = '/afs/psi.ch/project/EssentialMed/Images/' + \ str(Series) + '-FocusTest/F' + str('%04.1f' % F) + '/F' + \ str('%04.1f' % F) + '_Image_' + str('%02d' % i) + \ '_LineProfile_from_' + str(line.split()[1]) + '_to_' + \ str(line.split()[3]) + '_on_height_' + str(line.split()[2]) + \ '.png' plt.savefig(SaveName) print 'Figure ' + str(i) + ' saved to ' + SaveName plt.close() # View the figures we just saved as stack in Fiji viewcommand = '/scratch/Apps/Fiji.app/fiji-linux ' + \ '/afs/psi.ch/project/EssentialMed/Images/' + str(Series) +\ '-FocusTest/F' + str('%04.1f' % F) + '/F* &' os.system(viewcommand)
unlicense
8,246,217,816,463,031,000
40.780488
112
0.557696
false
dmytroKarataiev/MachineLearning
smartcab/smartcab/simulator.py
1
9740
import os import time import random import importlib import numpy as np class Simulator(object): """Simulates agents in a dynamic smartcab environment. Uses PyGame to display GUI, if available. """ colors = { 'black' : ( 0, 0, 0), 'white' : (255, 255, 255), 'red' : (255, 0, 0), 'green' : ( 0, 255, 0), 'blue' : ( 0, 0, 255), 'cyan' : ( 0, 200, 200), 'magenta' : (200, 0, 200), 'yellow' : (255, 255, 0), 'orange' : (255, 128, 0) } def __init__(self, env, size=None, update_delay=1.0, display=True): self.env = env self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 1) * self.env.block_size) self.width, self.height = self.size self.bg_color = self.colors['white'] self.road_width = 5 self.road_color = self.colors['black'] self.quit = False self.start_time = None self.current_time = 0.0 self.last_updated = 0.0 self.update_delay = update_delay # duration between each step (in secs) ## Stat variables self.trials = 0 self.success = 0 self.ranOutTime = 0 self.hardDeadline = 0 self.display = display if self.display: try: self.pygame = importlib.import_module('pygame') self.pygame.init() self.screen = self.pygame.display.set_mode(self.size) self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1) self.agent_sprite_size = (32, 32) self.agent_circle_radius = 10 # radius of circle, when using simple representation for agent in self.env.agent_states: agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("../images", "car-{}.png".format(agent.color))), self.agent_sprite_size) agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height()) self.font = self.pygame.font.Font(None, 28) self.paused = False except ImportError as e: self.display = False print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e) except Exception as e: self.display = False print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e) def run(self, n_trials=1): self.quit = False self.trials = 0 self.success = 0 self.ranOutTime = 0 self.hardDeadline = 0 for trial in xrange(n_trials): # print "Simulator.run(): Trial {}".format(trial) # [debug] self.env.reset() self.current_time = 0.0 self.last_updated = 0.0 self.start_time = time.time() while True: try: # Update current time self.current_time = time.time() - self.start_time #print "Simulator.run(): current_time = {:.3f}".format(self.current_time) # Handle GUI events if self.display: for event in self.pygame.event.get(): if event.type == self.pygame.QUIT: self.quit = True elif event.type == self.pygame.KEYDOWN: if event.key == 27: # Esc self.quit = True elif event.unicode == u' ': self.paused = True if self.paused: self.pause() # Update environment if self.current_time - self.last_updated >= self.update_delay: self.env.step() self.last_updated = self.current_time # Render GUI and sleep if self.display: self.render() self.pygame.time.wait(self.frame_delay) except KeyboardInterrupt: self.quit = True finally: if self.quit or self.env.done: self.trials += 1 if self.env.reached == 1: self.success += 1 if self.env.status == 1: self.ranOutTime += 1 elif self.env.status == 2: self.hardDeadline += 1 break if self.quit: break print "Success rate:", (len(self.env.primary_agent.totalStepsSuccess) / 100.0), "Steps to reach:", np.mean(self.env.primary_agent.totalStepsSuccess), \ "Rewards:", np.mean(self.env.primary_agent.totalRewardsSuccess), "Penalties:", np.mean(self.env.primary_agent.totalPenaltiesSuccess) print "Trials {}, successfull {}, ran out of time {}, hard deadline {}".format(self.trials, self.success, self.ranOutTime, self.hardDeadline) def render(self): # Clear screen self.screen.fill(self.bg_color) # Draw elements # * Static elements for road in self.env.roads: self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width) for intersection, traffic_light in self.env.intersections.iteritems(): self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), 10) if traffic_light.state: # North-South is open self.pygame.draw.line(self.screen, self.colors['green'], (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size - 15), (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size + 15), self.road_width) else: # East-West is open self.pygame.draw.line(self.screen, self.colors['green'], (intersection[0] * self.env.block_size - 15, intersection[1] * self.env.block_size), (intersection[0] * self.env.block_size + 15, intersection[1] * self.env.block_size), self.road_width) # * Dynamic elements for agent, state in self.env.agent_states.iteritems(): # Compute precise agent location here (back from the intersection some) agent_offset = (2 * state['heading'][0] * self.agent_circle_radius, 2 * state['heading'][1] * self.agent_circle_radius) agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1]) agent_color = self.colors[agent.color] if hasattr(agent, '_sprite') and agent._sprite is not None: # Draw agent sprite (image), properly rotated rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90) self.screen.blit(rotated_sprite, self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2, agent._sprite_size[0], agent._sprite_size[1])) else: # Draw simple agent (circle with a short line segment poking out to indicate heading) self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius) self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width) if agent.get_next_waypoint() is not None: self.screen.blit(self.font.render(agent.get_next_waypoint(), True, agent_color, self.bg_color), (agent_pos[0] + 10, agent_pos[1] + 10)) if state['destination'] is not None: self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 6) self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 15, 2) # * Overlays text_y = 10 for text in self.env.status_text.split('\n'): self.screen.blit(self.font.render(text, True, self.colors['red'], self.bg_color), (100, text_y)) text_y += 20 # Flip buffers self.pygame.display.flip() def pause(self): abs_pause_time = time.time() pause_text = "[PAUSED] Press any key to continue..." self.screen.blit(self.font.render(pause_text, True, self.colors['cyan'], self.bg_color), (100, self.height - 40)) self.pygame.display.flip() print pause_text # [debug] while self.paused: for event in self.pygame.event.get(): if event.type == self.pygame.KEYDOWN: self.paused = False self.pygame.time.wait(self.frame_delay) self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (100, self.height - 40)) self.start_time += (time.time() - abs_pause_time)
mit
4,681,323,323,775,302,000
47.944724
220
0.54117
false
strets123/pyms
Gapfill/Function.py
7
9648
''' @summary: # Functions to fill missing peak objects @author: Jairus Bowne @author: Sean O'Callaghan ''' import csv import string import sys, os, errno, string, numpy sys.path.append("/x/PyMS") from pyms.GCMS.IO.ANDI.Function import ANDI_reader from pyms.GCMS.IO.MZML.Function import mzML_reader from pyms.GCMS.Function import build_intensity_matrix_i from pyms.Noise.SavitzkyGolay import savitzky_golay from pyms.Baseline.TopHat import tophat from pyms.Deconvolution.BillerBiemann.Function import get_maxima_list_reduced from pyms.Peak.Function import ion_area from Class import MissingPeak, Sample # .csv reader (cloned from gcqc project) def file2matrix(filename): ''' @summary: Convert a .csv file to a matrix (list of lists) @param filename: Filename (.csv) to convert (area.csv, area_ci.csv) @type filename: StringType @return: Data matrix @rtype: ListType (List of lists) ''' # open(filename, 'rb')? Or unnecessary? with open(filename) as fp: reader = csv.reader(fp, delimiter=",",quotechar="\"") matrix = [] for row in reader: newrow = [] for each in row: try: each = float(each) except: pass newrow.append(each) matrix.append(newrow) return matrix def mp_finder(inputmatrix): """ @summary: setup sample objects with missing peak objects Finds the 'NA's in the transformed area_ci.csv file and makes Sample objects with them @param inputmatrix: Data matrix derived from the area_ci.csv file @type inputmatrix: listType @return: list of Sample objects @rtype: list of pyms.MissingPeak.Class.Sample """ sample_list = [] try: ci_pos = inputmatrix[0].index(' "Quant Ion"') except ValueError: ci_pos = inputmatrix[0].index('"Quant Ion"') uid_pos = inputmatrix[0].index('UID') # Set up the sample objects # All entries on line 1 beyond the Qual Ion position are sample names for i, sample_name in enumerate(inputmatrix[0][ci_pos:]): sample = Sample(sample_name, i+3) #add 4 to allow for UID, RT,QualIon sample_list.append(sample) for line in inputmatrix[1:]: uid = line[uid_pos] common_ion = line[ci_pos] qual_ion_1 = uid.split("-")[0] qual_ion_2 = uid.split("-")[1] rt = uid.split("-")[-1] #print rt for i, area in enumerate(line[ci_pos:]): if area == 'NA': missing_peak = MissingPeak(common_ion, qual_ion_1, \ qual_ion_2, rt) sample_list[i].add_missing_peak(missing_peak) return sample_list def missing_peak_finder(sample, filename, points=13, null_ions=[73, 147],\ crop_ions=[50,540], threshold=1000, rt_window=1, filetype='cdf'): """ @summary: Integrates raw data around missing peak locations to fill in NAs in the data matrix @param sample: The sample object containing missing peaks @type sample: pyms.MissingPeak.Class.Sample @param andi_file: Name of the raw data file @type andi_file: stringType @param points: Peak finding - Peak if maxima over 'points' \ number of scans (Default 3) @type points: intType @param null_ions: Ions to be deleted in the matrix @type null_ions: listType @param crop_ions: Range of Ions to be considered @type crop_ions: listType @param threshold: Minimum intensity of IonChromatogram allowable to fill\ missing peak @type threshold: intType @param rt_window: Window in seconds around average RT to look for \ missing peak @type rt_window: floatType @author: Sean O'Callaghan """ ### some error checks on null and crop ions ### a for root,files,dirs in os.path.walk(): loop print "Sample:", sample.get_name(), "File:", filename if filetype == 'cdf': data = ANDI_reader(filename) elif filetype == 'mzml': data = mzML_reader(filename) else: print "file type not valid" # build integer intensity matrix im = build_intensity_matrix_i(data) for null_ion in null_ions: im.null_mass(null_ion) im.crop_mass(crop_ions[0], crop_ions[1]) # get the size of the intensity matrix n_scan, n_mz = im.get_size() # smooth data for ii in range(n_mz): ic = im.get_ic_at_index(ii) ic1 = savitzky_golay(ic, points) ic_smooth = savitzky_golay(ic1, points) ic_base = tophat(ic_smooth, struct="1.5m") im.set_ic_at_index(ii, ic_base) for mp in sample.get_missing_peaks(): mp_rt = mp.get_rt() common_ion = mp.get_ci() qual_ion_1 = float(mp.get_qual_ion1()) qual_ion_2 = float(mp.get_qual_ion2()) ci_ion_chrom = im.get_ic_at_mass(common_ion) print "ci = ",common_ion qi1_ion_chrom = im.get_ic_at_mass(qual_ion_1) print "qi1 = ", qual_ion_1 qi2_ion_chrom = im.get_ic_at_mass(qual_ion_2) print "qi2 = ", qual_ion_2 ###### # Integrate the CI around that particular RT ####### #Convert time to points # How long between scans? points_1 = ci_ion_chrom.get_index_at_time(float(mp_rt)) points_2 = ci_ion_chrom.get_index_at_time(float(mp_rt)-rt_window) print "rt_window = ", points_1 - points_2 rt_window_points = points_1 - points_2 maxima_list = get_maxima_list_reduced(ci_ion_chrom, mp_rt, \ rt_window_points) large_peaks = [] for rt, intens in maxima_list: if intens > threshold: q1_index = qi1_ion_chrom.get_index_at_time(rt) q2_index = qi2_ion_chrom.get_index_at_time(rt) q1_intensity = qi1_ion_chrom.get_intensity_at_index(q1_index) q2_intensity = qi2_ion_chrom.get_intensity_at_index(q2_index) if q1_intensity > threshold/2 and q2_intensity > threshold/2: large_peaks.append([rt, intens]) print('found %d peaks above threshold'%len(large_peaks)) areas = [] for peak in large_peaks: apex = ci_ion_chrom.get_index_at_time(peak[0]) ia = ci_ion_chrom.get_intensity_array().tolist() area, left, fight, l_share, r_share = ion_area(ia, apex, 0) areas.append(area) ######################## areas.sort() if len(areas)>0: biggest_area = areas[-1] mp.set_ci_area(biggest_area) print "found area:", biggest_area, "at rt:", mp_rt else: print "Missing peak at rt = ", mp_rt mp.set_ci_area('na') def transposed(lists): """ @summary: transposes a list of lists @param lists: the list of lists to be transposed @type lists: listType """ if not lists: return [] return map(lambda *row: list(row), *lists) def write_filled_csv(sample_list, area_file, filled_area_file): """ @summary: creates a new area_ci.csv file, replacing NAs with values from the sample_list objects where possible @param sample_list: A list of sample objects @type sample_list: list of Class.Sample @param area_file: the file 'area_ci.csv' from PyMS output @type area_file: stringType @param filled_area_file: the new output file which has NA values replaced @type filled_area_file: stringType """ old_matrix = file2matrix(area_file) #Invert it to be a little more efficent invert_old_matrix = zip(*old_matrix) #print invert_old_matrix[0:5] uid_list = invert_old_matrix[0][1:] rt_list = [] for uid in uid_list: rt = uid.split('-')[-1] rt_list.append(rt) print rt_list #start setting up the output file invert_new_matrix = [] for line in invert_old_matrix[0:2]: invert_new_matrix.append(line) for line in invert_old_matrix[3:]: sample_name = line[0] new_line = [] new_line.append(sample_name) for sample in sample_list: if sample_name in sample.get_name(): rt_area_dict = sample.get_mp_rt_area_dict() print rt_area_dict for i, part in enumerate(line[1:]): #print part if part == 'NA': try: area = rt_area_dict[str(rt_list[i])] new_line.append(area) except(KeyError): pass #print 'missing peak not found for rt =', rt_list[i], \ # "in sample:", sample_name else: new_line.append(part) invert_new_matrix.append(new_line) print invert_new_matrix print len(invert_new_matrix[0]), len(invert_new_matrix) fp_new = open(filled_area_file, 'w') # new_matrix = numpy.empty(matrix_size) new_matrix = transposed(invert_new_matrix) for i, line in enumerate(new_matrix): for j, part in enumerate(line): fp_new.write(str(part) +',') fp_new.write("\n") fp_new.close()
gpl-2.0
8,064,595,078,807,377,000
28.686154
93
0.565713
false
stephanie-wang/ray
python/ray/tune/suggest/hyperopt.py
1
9289
import numpy as np import copy import logging from functools import partial import pickle try: hyperopt_logger = logging.getLogger("hyperopt") hyperopt_logger.setLevel(logging.WARNING) import hyperopt as hpo except ImportError: hpo = None from ray.tune.error import TuneError from ray.tune.suggest.suggestion import SuggestionAlgorithm logger = logging.getLogger(__name__) class HyperOptSearch(SuggestionAlgorithm): """A wrapper around HyperOpt to provide trial suggestions. Requires HyperOpt to be installed from source. Uses the Tree-structured Parzen Estimators algorithm, although can be trivially extended to support any algorithm HyperOpt uses. Externally added trials will not be tracked by HyperOpt. Trials of the current run can be saved using save method, trials of a previous run can be loaded using restore method, thus enabling a warm start feature. Parameters: space (dict): HyperOpt configuration. Parameters will be sampled from this configuration and will be used to override parameters generated in the variant generation process. max_concurrent (int): Number of maximum concurrent trials. Defaults to 10. metric (str): The training result objective value attribute. mode (str): One of {min, max}. Determines whether objective is minimizing or maximizing the metric attribute. points_to_evaluate (list): Initial parameter suggestions to be run first. This is for when you already have some good parameters you want hyperopt to run first to help the TPE algorithm make better suggestions for future parameters. Needs to be a list of dict of hyperopt-named variables. Choice variables should be indicated by their index in the list (see example) n_initial_points (int): number of random evaluations of the objective function before starting to aproximate it with tree parzen estimators. Defaults to 20. random_state_seed (int, array_like, None): seed for reproducible results. Defaults to None. gamma (float in range (0,1)): parameter governing the tree parzen estimators suggestion algorithm. Defaults to 0.25. use_early_stopped_trials (bool): Whether to use early terminated trial results in the optimization process. Example: >>> space = { >>> 'width': hp.uniform('width', 0, 20), >>> 'height': hp.uniform('height', -100, 100), >>> 'activation': hp.choice("activation", ["relu", "tanh"]) >>> } >>> current_best_params = [{ >>> 'width': 10, >>> 'height': 0, >>> 'activation': 0, # The index of "relu" >>> }] >>> algo = HyperOptSearch( >>> space, max_concurrent=4, metric="mean_loss", mode="min", >>> points_to_evaluate=current_best_params) """ def __init__(self, space, max_concurrent=10, reward_attr=None, metric="episode_reward_mean", mode="max", points_to_evaluate=None, n_initial_points=20, random_state_seed=None, gamma=0.25, **kwargs): assert hpo is not None, "HyperOpt must be installed!" from hyperopt.fmin import generate_trials_to_calculate assert type(max_concurrent) is int and max_concurrent > 0 assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!" if reward_attr is not None: mode = "max" metric = reward_attr logger.warning( "`reward_attr` is deprecated and will be removed in a future " "version of Tune. " "Setting `metric={}` and `mode=max`.".format(reward_attr)) self._max_concurrent = max_concurrent self._metric = metric # hyperopt internally minimizes, so "max" => -1 if mode == "max": self._metric_op = -1. elif mode == "min": self._metric_op = 1. if n_initial_points is None: self.algo = hpo.tpe.suggest else: self.algo = partial( hpo.tpe.suggest, n_startup_jobs=n_initial_points) if gamma is not None: self.algo = partial(self.algo, gamma=gamma) self.domain = hpo.Domain(lambda spc: spc, space) if points_to_evaluate is None: self._hpopt_trials = hpo.Trials() self._points_to_evaluate = 0 else: assert type(points_to_evaluate) == list self._hpopt_trials = generate_trials_to_calculate( points_to_evaluate) self._hpopt_trials.refresh() self._points_to_evaluate = len(points_to_evaluate) self._live_trial_mapping = {} if random_state_seed is None: self.rstate = np.random.RandomState() else: self.rstate = np.random.RandomState(random_state_seed) super(HyperOptSearch, self).__init__(**kwargs) def _suggest(self, trial_id): if self._num_live_trials() >= self._max_concurrent: return None if self._points_to_evaluate > 0: new_trial = self._hpopt_trials.trials[self._points_to_evaluate - 1] self._points_to_evaluate -= 1 else: new_ids = self._hpopt_trials.new_trial_ids(1) self._hpopt_trials.refresh() # Get new suggestion from Hyperopt new_trials = self.algo(new_ids, self.domain, self._hpopt_trials, self.rstate.randint(2**31 - 1)) self._hpopt_trials.insert_trial_docs(new_trials) self._hpopt_trials.refresh() new_trial = new_trials[0] self._live_trial_mapping[trial_id] = (new_trial["tid"], new_trial) # Taken from HyperOpt.base.evaluate config = hpo.base.spec_from_misc(new_trial["misc"]) ctrl = hpo.base.Ctrl(self._hpopt_trials, current_trial=new_trial) memo = self.domain.memo_from_config(config) hpo.utils.use_obj_for_literal_in_memo(self.domain.expr, ctrl, hpo.base.Ctrl, memo) suggested_config = hpo.pyll.rec_eval( self.domain.expr, memo=memo, print_node_on_error=self.domain.rec_eval_print_node_on_error) return copy.deepcopy(suggested_config) def on_trial_result(self, trial_id, result): ho_trial = self._get_hyperopt_trial(trial_id) if ho_trial is None: return now = hpo.utils.coarse_utcnow() ho_trial["book_time"] = now ho_trial["refresh_time"] = now def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Notification for the completion of trial. The result is internally negated when interacting with HyperOpt so that HyperOpt can "maximize" this value, as it minimizes on default. """ ho_trial = self._get_hyperopt_trial(trial_id) if ho_trial is None: return ho_trial["refresh_time"] = hpo.utils.coarse_utcnow() if error: ho_trial["state"] = hpo.base.JOB_STATE_ERROR ho_trial["misc"]["error"] = (str(TuneError), "Tune Error") self._hpopt_trials.refresh() else: self._process_result(trial_id, result, early_terminated) del self._live_trial_mapping[trial_id] def _process_result(self, trial_id, result, early_terminated=False): ho_trial = self._get_hyperopt_trial(trial_id) ho_trial["refresh_time"] = hpo.utils.coarse_utcnow() if early_terminated and self._use_early_stopped is False: ho_trial["state"] = hpo.base.JOB_STATE_ERROR ho_trial["misc"]["error"] = (str(TuneError), "Tune Removed") return ho_trial["state"] = hpo.base.JOB_STATE_DONE hp_result = self._to_hyperopt_result(result) ho_trial["result"] = hp_result self._hpopt_trials.refresh() def _to_hyperopt_result(self, result): return {"loss": self._metric_op * result[self._metric], "status": "ok"} def _get_hyperopt_trial(self, trial_id): if trial_id not in self._live_trial_mapping: return hyperopt_tid = self._live_trial_mapping[trial_id][0] return [ t for t in self._hpopt_trials.trials if t["tid"] == hyperopt_tid ][0] def _num_live_trials(self): return len(self._live_trial_mapping) def save(self, checkpoint_dir): trials_object = (self._hpopt_trials, self.rstate.get_state()) with open(checkpoint_dir, "wb") as outputFile: pickle.dump(trials_object, outputFile) def restore(self, checkpoint_dir): with open(checkpoint_dir, "rb") as inputFile: trials_object = pickle.load(inputFile) self._hpopt_trials = trials_object[0] self.rstate.set_state(trials_object[1])
apache-2.0
1,423,453,493,068,253,700
40.10177
79
0.590268
false
albertoferna/compmech
doc/source/conf_scipy.py
3
6677
# -*- coding: utf-8 -*- import sys, os, re # Check Sphinx version import sphinx if sphinx.__version__ < "1.1": raise RuntimeError("Sphinx 1.1 or newer required") needs_sphinx = '1.1' # ----------------------------------------------------------------------------- # General configuration # ----------------------------------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sys.path.insert(0, os.path.abspath('../sphinxext')) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', 'sphinx.ext.autosummary', 'numpydoc', 'sphinx.ext.graphviz'] # Determine if the matplotlib has a recent enough version of the # plot_directive. try: from matplotlib.sphinxext import plot_directive except ImportError: use_matplotlib_plot_directive = False else: try: use_matplotlib_plot_directive = (plot_directive.__version__ >= 2) except AttributeError: use_matplotlib_plot_directive = False if use_matplotlib_plot_directive: extensions.append('matplotlib.sphinxext.plot_directive') else: raise RuntimeError("You need a recent enough version of matplotlib") # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'CompMech' copyright = '2012-2014 Saullo G. P. Castro' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. import compmech version = compmech.__version__ release = version # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "math" # List of directories, relative to source directories, that shouldn't be searched # for source files. exclude_dirs = [] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # ----------------------------------------------------------------------------- # HTML output # ----------------------------------------------------------------------------- themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme') # Build without scipy.org sphinx theme present html_style = 'scipy_fallback.css' html_logo = './logo/logo.png' html_sidebars = {'index': 'indexsidebar.html'} html_title = "%s v%s Reference Guide" % (project, version) html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y' html_additional_pages = {} html_use_modindex = True html_copy_source = False html_file_suffix = '.html' htmlhelp_basename = 'CompMechdoc' pngmath_use_preview = True pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent'] # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = 'Written by the Saullo G. P. Castro' latex_documents = [ ('index', 'scipy-ref.tex', 'CompMech Reference Guide', _stdauthor, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r''' \usepackage{amsmath} \DeclareUnicodeCharacter{00A0}{\nobreakspace} % In the parameters etc. sections, align uniformly, and adjust label emphasis \usepackage{expdlist} \let\latexdescription=\description \let\endlatexdescription=\enddescription \renewenvironment{description}% {\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}% {\end{latexdescription}} % Make Examples/etc section headers smaller and more compact \makeatletter \titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}% {\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor} \titlespacing*{\paragraph}{0pt}{1ex}{0pt} \makeatother % Save vertical space in parameter lists and elsewhere \makeatletter \renewenvironment{quote}% {\list{}{\topsep=0pt% \parsep \z@ \@plus\p@}% \item\relax}% {\endlist} \makeatother % Fix footer/header \renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}} \renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}} ''' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_use_modindex = False # ----------------------------------------------------------------------------- # Intersphinx configuration # ----------------------------------------------------------------------------- intersphinx_mapping = { 'http://docs.python.org/dev': None, 'http://docs.scipy.org/doc/numpy': None, } # ----------------------------------------------------------------------------- # Numpy extensions # ----------------------------------------------------------------------------- # If we want to do a phantom import from an XML file for all autodocs phantom_import_file = 'dump.xml' # Generate plots for example sections numpydoc_use_plots = True # ----------------------------------------------------------------------------- # Autosummary # ----------------------------------------------------------------------------- if sphinx.__version__ >= "0.7": import glob autosummary_generate = glob.glob("*.rst") # Use svg for graphviz graphviz_output_format = 'svg'
bsd-3-clause
5,327,442,863,046,868,000
30.347418
93
0.606859
false
radioxoma/immunopy
immunopy/stain/cdeconvcl.py
1
6396
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from __future__ import division """ Created on Wed Aug 06 19:37:30 2014 @author: radioxoma """ import os import numpy as np from skimage import color import pyopencl as cl import pyopencl.array as cla from scipy import misc import matplotlib.pyplot as plt VERBOSE = False class ColorDeconvolution(object): """Provide color deconvolution facilities with OpenCL. """ def __init__(self): super(ColorDeconvolution, self).__init__() self.__basetype = np.float32 curdir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(curdir, 'kernels.cl')) as f: kernels = f.read() # ctx = cl.create_some_context() self.ctx = cl.Context( cl.get_platforms()[0].get_devices(device_type=cl.device_type.GPU)) if VERBOSE: print(self.ctx.get_info(cl.context_info.DEVICES)) queue = cl.CommandQueue(self.ctx) self.prg = cl.Program(self.ctx, kernels).build() # print(self.prg.get_info(cl.program_info.KERNEL_NAMES)) # Not in 1:2013.2 # self.stain = color.hed_from_rgb.astype(self.__basetype) # self.stain_g = cla.to_device(queue, self.f_order(self.stain), self.mem_pool) # stain = np.arange(9, dtype=self.__basetype).reshape((3, 3)) self.mem_pool = cl.tools.MemoryPool(cl.tools.ImmediateAllocator(queue)) def check_contiguous(self, arr): """Change memory layout to C (row-major) order, cast to float32. It's *not* oposite of f_order. """ if not arr.flags.c_contiguous: arr = np.ascontiguousarray(arr, dtype=np.float32) if VERBOSE: print('check_arr: ascontiguous %d elements - performance may suffer') % arr.size if arr.dtype is not np.float32: arr = arr.astype(np.float32) if VERBOSE: print('check_arr: casting to float32 %d elements - performance may suffer') % arr.size return arr def check_fortran(self, arr): """Change memory layout to FORTRAN (column-major) order, cast to float32. """ if not arr.flags.f_contiguous: arr = np.asfortranarray(arr, dtype=np.float32) if VERBOSE: print('check_arr: as fortran %d elements - performance may suffer') % arr.size if arr.dtype is not np.float32: arr = arr.astype(np.float32) if VERBOSE: print('check_arr: casting to float32 %d elements - performance may suffer') % arr.size return arr def optical_density(self, rgb): queue = cl.CommandQueue(self.ctx) if rgb.dtype is not np.float32: rgb = rgb.astype(np.float32) img_g = cla.to_device(queue, rgb, self.mem_pool) self.prg.opticalDense(queue, (img_g.size, 1), None, img_g.data) return img_g.get() def dot(self, A, B): """Output must have same shape as A. Incoming RGB matrix "A" should be aligned """ A = self.check_contiguous(A) B = self.check_contiguous(B) assert(A.flags.c_contiguous == B.flags.c_contiguous) queue = cl.CommandQueue(self.ctx) if A.dtype is not np.float32: A = A.astype(np.float32) if B.dtype is not np.float32: B = B.astype(np.float32) A_g = cla.to_device(queue, A, self.mem_pool) B_g = cla.to_device(queue, B, self.mem_pool) C_g = cla.empty(queue, (A.shape[0], B.shape[1]), dtype=A_g.dtype, order="C", allocator=self.mem_pool) self.prg.gemm_slow(queue, C_g.shape, None, C_g.data, A_g.data, B_g.data, np.int32(A.shape[1]), np.int32(B.shape[1])) return C_g.get() def unmix_stains(self, rgb, stain): """Take RGB IHC image and split it to stains like skimage version. """ rgb = self.check_contiguous(rgb) stain = self.check_contiguous(stain) assert(rgb.flags.c_contiguous == stain.flags.c_contiguous) queue = cl.CommandQueue(self.ctx) rgb2d = rgb.reshape(-1, 3) # 2D array with R,G,B columns from 3D rgb2d_g = cla.to_device(queue, rgb2d, allocator=self.mem_pool) stain_g = cla.to_device(queue, stain, allocator=self.mem_pool) out_g = cla.empty(queue, (rgb2d.shape[0], stain.shape[1]), dtype=rgb2d_g.dtype, order="C", allocator=self.mem_pool) # Process as flat array self.prg.opticalDense(queue, (rgb2d.size, 1), None, rgb2d_g.data) # In PyOpenCL arrays rgb2d_g.shape[0] is column count (usually 3 columns here). self.prg.gemm_slow(queue, out_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d.shape[1]), np.int32(stain.shape[1])) ### self.prg.gemm(queue, rgb2d_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d_g.shape[0]), np.int32(stain_g.shape[1])) # event = # event.wait() return out_g.get().reshape(rgb.shape) # Again 3D array def color_deconvolution(self, rgb, stain): """Return stains in normal (non-logarithmic) color space. """ rgb = self.check_contiguous(rgb) stain = self.check_contiguous(stain) assert(rgb.flags.c_contiguous == stain.flags.c_contiguous) queue = cl.CommandQueue(self.ctx) rgb2d = rgb.reshape(-1, 3) # 2D array with R,G,B columns from 3D rgb2d_g = cla.to_device(queue, rgb2d, allocator=self.mem_pool) stain_g = cla.to_device(queue, stain, allocator=self.mem_pool) out_g = cla.empty(queue, (rgb2d.shape[0], stain.shape[1]), dtype=rgb2d_g.dtype, order="C", allocator=self.mem_pool) # Process as flat array self.prg.opticalDense(queue, (rgb2d.size, 1), None, rgb2d_g.data) # In PyOpenCL arrays rgb2d_g.shape[0] is column count (usually 3 columns here). self.prg.gemm_slow(queue, out_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d.shape[1]), np.int32(stain.shape[1])) self.prg.toColorDense(queue, (out_g.size, 1), None, out_g.data) return out_g.get().reshape(rgb.shape) # Again 3D array def f_order(arr): """Convert to FORTRAN (column-major) order, if still not.""" if arr.flags.c_contiguous: print("Transposing array") return np.array(arr.T, copy=False, order='F') else: return np.array(arr, copy=False, order='F')
mit
6,577,647,387,119,431,000
42.510204
149
0.61601
false
shunsukeaihara/pysas
pysas/pyexcite.py
1
1172
# -*- coding: utf-8 -*- import numpy as np try: from numba import jit except ImportError: from pysas.decorators import do_nothing as jit @jit def gen_frame(cur_f0, prev_f0, frame, samplingrate, gen_samples, gauss): if cur_f0 == 0.0 or prev_f0 == 0.0: if gauss: return np.random.normal(0.0, 1.0, frame), gen_samples else: return np.random.random(frame), gen_samples ret = np.zeros(frame) ncur = float(samplingrate) / cur_f0 nprev = float(samplingrate) / prev_f0 slope = (nprev - ncur) / float(frame) for i in range(frame): f0 = ncur + slope * i if gen_samples > int(f0): ret[i] = f0 gen_samples -= int(f0) gen_samples += 1 return np.sqrt(ret), gen_samples @jit def gen_pulse(f0, frame, samplingrate, gauss): ret = np.zeros(f0.shape[0] * frame) prev_f0 = f0[0] gen_samples = 0 for i in range(f0.size): cur_f0 = f0[i] pluses, gen_samples = gen_frame(cur_f0, prev_f0, frame, samplingrate, gen_samples, gauss) prev_f0 = cur_f0 start = frame * i ret[start:start+frame] = pluses return ret
mit
3,515,681,140,772,425,000
27.585366
97
0.585324
false
QISKit/qiskit-sdk-py
qiskit/visualization/pulse/matplotlib.py
1
22579
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name """Matplotlib classes for pulse visualization.""" import collections import numpy as np try: from matplotlib import pyplot as plt, gridspec HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle from qiskit.visualization.pulse import interpolation from qiskit.pulse.channels import (DriveChannel, ControlChannel, MeasureChannel, AcquireChannel, SnapshotChannel) from qiskit.pulse import (SamplePulse, FrameChange, PersistentValue, Snapshot, Acquire, PulseError) class EventsOutputChannels: """Pulse dataset for channel.""" def __init__(self, t0, tf): """Create new channel dataset. Args: t0 (int): starting time of plot tf (int): ending time of plot """ self.pulses = {} self.t0 = t0 self.tf = tf self._waveform = None self._framechanges = None self._conditionals = None self._snapshots = None self._labels = None self.enable = False def add_instruction(self, start_time, pulse): """Add new pulse instruction to channel. Args: start_time (int): Starting time of instruction pulse (Instruction): Instruction object to be added """ if start_time in self.pulses.keys(): self.pulses[start_time].append(pulse.command) else: self.pulses[start_time] = [pulse.command] @property def waveform(self): """Get waveform.""" if self._waveform is None: self._build_waveform() return self._waveform[self.t0:self.tf] @property def framechanges(self): """Get frame changes.""" if self._framechanges is None: self._build_waveform() return self._trim(self._framechanges) @property def conditionals(self): """Get conditionals.""" if self._conditionals is None: self._build_waveform() return self._trim(self._conditionals) @property def snapshots(self): """Get snapshots.""" if self._snapshots is None: self._build_waveform() return self._trim(self._snapshots) @property def labels(self): """Get labels.""" if self._labels is None: self._build_waveform() return self._trim(self._labels) def is_empty(self): """Return if pulse is empty. Returns: bool: if the channel has nothing to plot """ if any(self.waveform) or self.framechanges or self.conditionals or self.snapshots: return False return True def to_table(self, name): """Get table contains. Args: name (str): name of channel Returns: dict: dictionary of events in the channel """ time_event = [] framechanges = self.framechanges conditionals = self.conditionals snapshots = self.snapshots for key, val in framechanges.items(): data_str = 'framechange: %.2f' % val time_event.append((key, name, data_str)) for key, val in conditionals.items(): data_str = 'conditional, %s' % val time_event.append((key, name, data_str)) for key, val in snapshots.items(): data_str = 'snapshot: %s' % val time_event.append((key, name, data_str)) return time_event def _build_waveform(self): """Create waveform from stored pulses. """ self._framechanges = {} self._conditionals = {} self._snapshots = {} self._labels = {} fc = 0 pv = np.zeros(self.tf + 1, dtype=np.complex128) wf = np.zeros(self.tf + 1, dtype=np.complex128) last_pv = None for time, commands in sorted(self.pulses.items()): if time > self.tf: break tmp_fc = 0 for command in commands: if isinstance(command, FrameChange): tmp_fc += command.phase pv[time:] = 0 elif isinstance(command, Snapshot): self._snapshots[time] = command.name if tmp_fc != 0: self._framechanges[time] = tmp_fc fc += tmp_fc for command in commands: if isinstance(command, PersistentValue): pv[time:] = np.exp(1j*fc) * command.value last_pv = (time, command) break for command in commands: duration = command.duration tf = min(time + duration, self.tf) if isinstance(command, SamplePulse): wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time] pv[time:] = 0 self._labels[time] = (tf, command) if last_pv is not None: pv_cmd = last_pv[1] self._labels[last_pv[0]] = (time, pv_cmd) last_pv = None elif isinstance(command, Acquire): wf[time:tf] = np.ones(tf - time) self._labels[time] = (tf, command) self._waveform = wf + pv def _trim(self, events): """Return events during given `time_range`. Args: events (dict): time and operation of events Returns: dict: dictionary of events within the time """ events_in_time_range = {} for k, v in events.items(): if self.t0 <= k <= self.tf: events_in_time_range[k] = v return events_in_time_range class SamplePulseDrawer: """A class to create figure for sample pulse.""" def __init__(self, style): """Create new figure. Args: style (PulseStyle): style sheet """ self.style = style or PulseStyle() def draw(self, pulse, dt, interp_method, scaling=1): """Draw figure. Args: pulse (SamplePulse): SamplePulse to draw dt (float): time interval interp_method (Callable): interpolation function See `qiskit.visualization.interpolation` for more information scaling (float): Relative visual scaling of waveform amplitudes Returns: matplotlib.figure: A matplotlib figure object of the pulse envelope """ figure = plt.figure() interp_method = interp_method or interpolation.step_wise figure.set_size_inches(self.style.figsize[0], self.style.figsize[1]) ax = figure.add_subplot(111) ax.set_facecolor(self.style.bg_color) samples = pulse.samples time = np.arange(0, len(samples) + 1, dtype=float) * dt time, re, im = interp_method(time, samples, self.style.num_points) # plot ax.fill_between(x=time, y1=re, y2=np.zeros_like(time), facecolor=self.style.wave_color[0], alpha=0.3, edgecolor=self.style.wave_color[0], linewidth=1.5, label='real part') ax.fill_between(x=time, y1=im, y2=np.zeros_like(time), facecolor=self.style.wave_color[1], alpha=0.3, edgecolor=self.style.wave_color[1], linewidth=1.5, label='imaginary part') ax.set_xlim(0, pulse.duration * dt) if scaling: ax.set_ylim(-scaling, scaling) else: v_max = max(max(np.abs(re)), max(np.abs(im))) ax.set_ylim(-1.2 * v_max, 1.2 * v_max) return figure class ScheduleDrawer: """A class to create figure for schedule and channel.""" def __init__(self, style): """Create new figure. Args: style (SchedStyle): style sheet """ self.style = style or SchedStyle() def _build_channels(self, schedule, channels_to_plot, t0, tf): # prepare waveform channels drive_channels = collections.OrderedDict() measure_channels = collections.OrderedDict() control_channels = collections.OrderedDict() acquire_channels = collections.OrderedDict() snapshot_channels = collections.OrderedDict() _channels = list(schedule.channels) + channels_to_plot _channels = list(set(_channels)) for chan in _channels: if isinstance(chan, DriveChannel): try: drive_channels[chan] = EventsOutputChannels(t0, tf) except PulseError: pass elif isinstance(chan, MeasureChannel): try: measure_channels[chan] = EventsOutputChannels(t0, tf) except PulseError: pass elif isinstance(chan, ControlChannel): try: control_channels[chan] = EventsOutputChannels(t0, tf) except PulseError: pass elif isinstance(chan, AcquireChannel): try: acquire_channels[chan] = EventsOutputChannels(t0, tf) except PulseError: pass elif isinstance(chan, SnapshotChannel): try: snapshot_channels[chan] = EventsOutputChannels(t0, tf) except PulseError: pass output_channels = {**drive_channels, **measure_channels, **control_channels, **acquire_channels} channels = {**output_channels, **acquire_channels, **snapshot_channels} # sort by index then name to group qubits together. output_channels = collections.OrderedDict(sorted(output_channels.items(), key=lambda x: (x[0].index, x[0].name))) channels = collections.OrderedDict(sorted(channels.items(), key=lambda x: (x[0].index, x[0].name))) for start_time, instruction in schedule.instructions: for channel in instruction.channels: if channel in output_channels: output_channels[channel].add_instruction(start_time, instruction) elif channel in snapshot_channels: snapshot_channels[channel].add_instruction(start_time, instruction) return channels, output_channels, snapshot_channels def _count_valid_waveforms(self, channels, scaling=1, channels_to_plot=None, plot_all=False): # count numbers of valid waveform n_valid_waveform = 0 v_max = 0 for channel, events in channels.items(): if channels_to_plot: if channel in channels_to_plot: waveform = events.waveform v_max = max(v_max, max(np.abs(np.real(waveform))), max(np.abs(np.imag(waveform)))) n_valid_waveform += 1 events.enable = True else: if not events.is_empty() or plot_all: waveform = events.waveform v_max = max(v_max, max(np.abs(np.real(waveform))), max(np.abs(np.imag(waveform)))) n_valid_waveform += 1 events.enable = True # when input schedule is empty or comprises only frame changes, # we need to overwrite maximum amplitude by a value greater than zero, # otherwise auto axis scaling will fail with zero division. v_max = v_max or 1 if scaling: v_max = 0.5 * scaling else: v_max = 0.5 / (1.2 * v_max) return n_valid_waveform, v_max # pylint: disable=unused-argument def _draw_table(self, figure, channels, dt, n_valid_waveform): # create table table_data = [] if self.style.use_table: for channel, events in channels.items(): if events.enable: table_data.extend(events.to_table(channel.name)) table_data = sorted(table_data, key=lambda x: x[0]) # plot table if table_data: # table area size ncols = self.style.table_columns nrows = int(np.ceil(len(table_data)/ncols)) # fig size h_table = nrows * self.style.fig_unit_h_table h_waves = (self.style.figsize[1] - h_table) # create subplots gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0) tb = plt.subplot(gs[0]) ax = plt.subplot(gs[1]) # configure each cell tb.axis('off') cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)] cell_color = [self.style.table_color * ncols for _jj in range(nrows)] cell_width = [*([0.2, 0.2, 0.5] * ncols)] for ii, data in enumerate(table_data): # pylint: disable=unbalanced-tuple-unpacking r, c = np.unravel_index(ii, (nrows, ncols), order='f') # pylint: enable=unbalanced-tuple-unpacking time, ch_name, data_str = data # item cell_value[r][3 * c + 0] = 't = %s' % time * dt cell_value[r][3 * c + 1] = 'ch %s' % ch_name cell_value[r][3 * c + 2] = data_str table = tb.table(cellText=cell_value, cellLoc='left', rowLoc='center', colWidths=cell_width, bbox=[0, 0, 1, 1], cellColours=cell_color) table.auto_set_font_size(False) table.set_fontsize = self.style.table_font_size else: ax = figure.add_subplot(111) figure.set_size_inches(self.style.figsize[0], self.style.figsize[1]) return ax def _draw_snapshots(self, ax, snapshot_channels, dt, y0): for events in snapshot_channels.values(): snapshots = events.snapshots if snapshots: for time in snapshots: ax.annotate(s=u"\u25D8", xy=(time*dt, y0), xytext=(time*dt, y0+0.08), arrowprops={'arrowstyle': 'wedge'}, ha='center') def _draw_framechanges(self, ax, fcs, dt, y0): framechanges_present = True for time in fcs.keys(): ax.text(x=time*dt, y=y0, s=r'$\circlearrowleft$', fontsize=self.style.icon_font_size, ha='center', va='center') return framechanges_present def _get_channel_color(self, channel): # choose color if isinstance(channel, DriveChannel): color = self.style.d_ch_color elif isinstance(channel, ControlChannel): color = self.style.u_ch_color elif isinstance(channel, MeasureChannel): color = self.style.m_ch_color elif isinstance(channel, AcquireChannel): color = self.style.a_ch_color else: color = 'black' return color def _prev_label_at_time(self, prev_labels, time): for _, labels in enumerate(prev_labels): for t0, (tf, _) in labels.items(): if time in (t0, tf): return True return False def _draw_labels(self, ax, labels, prev_labels, dt, y0): for t0, (tf, cmd) in labels.items(): if isinstance(cmd, PersistentValue): name = cmd.name if cmd.name else 'pv' elif isinstance(cmd, Acquire): name = cmd.name if cmd.name else 'acquire' else: name = cmd.name ax.annotate(r'%s' % name, xy=((t0+tf)//2*dt, y0), xytext=((t0+tf)//2*dt, y0-0.07), fontsize=self.style.label_font_size, ha='center', va='center') linestyle = self.style.label_ch_linestyle alpha = self.style.label_ch_alpha color = self.style.label_ch_color if not self._prev_label_at_time(prev_labels, t0): ax.axvline(t0*dt, -1, 1, color=color, linestyle=linestyle, alpha=alpha) if not (self._prev_label_at_time(prev_labels, tf) or tf in labels): ax.axvline(tf*dt, -1, 1, color=color, linestyle=linestyle, alpha=alpha) def _draw_channels(self, ax, output_channels, interp_method, t0, tf, dt, v_max, label=False, framechange=True): y0 = 0 prev_labels = [] for channel, events in output_channels.items(): if events.enable: # plot waveform waveform = events.waveform time = np.arange(t0, tf + 1, dtype=float) * dt if waveform.any(): time, re, im = interp_method(time, waveform, self.style.num_points) else: # when input schedule is empty or comprises only frame changes, # we should avoid interpolation due to lack of data points. # instead, it just returns vector of zero. re, im = np.zeros_like(time), np.zeros_like(time) color = self._get_channel_color(channel) # scaling and offset re = v_max * re + y0 im = v_max * im + y0 offset = np.zeros_like(time) + y0 # plot ax.fill_between(x=time, y1=re, y2=offset, facecolor=color[0], alpha=0.3, edgecolor=color[0], linewidth=1.5, label='real part') ax.fill_between(x=time, y1=im, y2=offset, facecolor=color[1], alpha=0.3, edgecolor=color[1], linewidth=1.5, label='imaginary part') ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0) # plot frame changes fcs = events.framechanges if fcs and framechange: self._draw_framechanges(ax, fcs, dt, y0) # plot labels labels = events.labels if labels and label: self._draw_labels(ax, labels, prev_labels, dt, y0) prev_labels.append(labels) else: continue # plot label ax.text(x=0, y=y0, s=channel.name, fontsize=self.style.axis_font_size, ha='right', va='center') y0 -= 1 return y0 def draw(self, schedule, dt, interp_method, plot_range, scaling=1, channels_to_plot=None, plot_all=True, table=True, label=False, framechange=True): """Draw figure. Args: schedule (ScheduleComponent): Schedule to draw dt (float): time interval interp_method (Callable): interpolation function See `qiskit.visualization.interpolation` for more information plot_range (tuple[float]): plot range scaling (float): Relative visual scaling of waveform amplitudes channels_to_plot (list[OutputChannel]): channels to draw plot_all (bool): if plot all channels even it is empty table (bool): Draw event table label (bool): Label individual instructions framechange (bool): Add framechange indicators Returns: matplotlib.figure: A matplotlib figure object for the pulse schedule Raises: VisualizationError: when schedule cannot be drawn """ figure = plt.figure() if not channels_to_plot: channels_to_plot = [] interp_method = interp_method or interpolation.step_wise # setup plot range if plot_range: t0 = int(np.floor(plot_range[0]/dt)) tf = int(np.floor(plot_range[1]/dt)) else: t0 = 0 # when input schedule is empty or comprises only frame changes, # we need to overwrite pulse duration by an integer greater than zero, # otherwise waveform returns empty array and matplotlib will be crashed. tf = schedule.stop_time or 1 # prepare waveform channels (channels, output_channels, snapshot_channels) = self._build_channels(schedule, channels_to_plot, t0, tf) # count numbers of valid waveform n_valid_waveform, v_max = self._count_valid_waveforms(output_channels, scaling=scaling, channels_to_plot=channels_to_plot, plot_all=plot_all) if table: ax = self._draw_table(figure, channels, dt, n_valid_waveform) else: ax = figure.add_subplot(111) figure.set_size_inches(self.style.figsize[0], self.style.figsize[1]) ax.set_facecolor(self.style.bg_color) y0 = self._draw_channels(ax, output_channels, interp_method, t0, tf, dt, v_max, label=label, framechange=framechange) self._draw_snapshots(ax, snapshot_channels, dt, y0) ax.set_xlim(t0 * dt, tf * dt) ax.set_ylim(y0, 1) ax.set_yticklabels([]) return figure
apache-2.0
9,021,072,622,400,497,000
36.197694
96
0.529829
false
jungla/ICOM-fluidity-toolbox
2D/U/plot_W_t.py
1
3442
import os, sys import vtktools import fluidity_tools import numpy as np import matplotlib as mpl mpl.use('ps') import matplotlib.pyplot as plt #label = sys.argv[1] #basename = sys.argv[2] path0 = path1 = path2 = '../RST/stat_files/' path2b = '/scratch/jmensa/m_10_1/' # file0 = 'm_50_6f.stat' filepath0 = path0+file0 stat0 = fluidity_tools.stat_parser(filepath0) file1 = 'm_25_1.stat' filepath1 = path1+file1 stat1 = fluidity_tools.stat_parser(filepath1) file2 = 'm_10_1.stat' filepath2 = path2+file2 stat2 = fluidity_tools.stat_parser(filepath2) file2b = 'mli_checkpoint.stat' filepath2b = path2b+file2b stat2b = fluidity_tools.stat_parser(filepath2b) #file1 = 'ring.stat' #filepath1 = path1+file1 #stat1 = fluidity_tools.stat_parser(filepath1) time0 = stat0["ElapsedTime"]["value"]/86400.0 time1 = stat1["ElapsedTime"]["value"]/86400.0 time2 = stat2["ElapsedTime"]["value"]/86400.0 time2b = stat2b["ElapsedTime"]["value"]/86400.0 #time1 = stat1["ElapsedTime"]["value"]/86400.0 Temp0 = stat0["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"] Temp1 = stat1["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"] Temp2 = stat2["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"] Temp2b = stat2b["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"] #KE1 = 0.5*np.sqrt(stat1["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]) Temp2 = Temp2[np.where(time2<time2b[0])] time2 = time2[np.where(time2<time2b[0])] Temp2 = np.hstack((Temp2,Temp2b)) time2 = np.hstack((time2,time2b)) dayf = np.min((len(time0),len(time1),len(time2))) Temp0a = 0 Temp1a = Temp1[:dayf]-Temp0[:dayf] Temp2a = Temp2[:dayf]-Temp0[:dayf] T0 = len(time0) T1 = len(time1) T2 = len(time2) # volume V = 1 #2000.0*2000.0*50 # plot KE fig = plt.figure(figsize=(6,3)) #T10, = plt.plot(time0[np.where(time0<=4)], Temp0a[np.where(time0<=4)]/V, 'r-',linewidth=1.5) T50, = plt.plot(time1[np.where(time1<=5)], Temp1a[np.where(time1<=5)]/V, 'r',linewidth=1.5) T25, = plt.plot(time2[np.where(time2<=5)], Temp2a[np.where(time2<=5)]/V, 'b',linewidth=1.5) #plt.ylim([0.0014, 0.00142]) plt.xlim([0, 7]) plt.xticks(np.linspace(0,5,6),np.linspace(0,5,6).astype(int)) #plt.xlim([0.920, 0.980]) plt.legend([T50,T25],['$B50_m$ - $B10_m$','$B25_m$ - $B10_m$'],loc=4) #plt.plot(time1, KE1/V, 'k',linewidth=1.5) plt.xlabel("Time $[days]$",fontsize=18) plt.ylabel("$l^2-norm$ w' $[m s^{-1}]$",fontsize=18) plt.tight_layout() plt.savefig('./plot/V_t_50_25_10.eps') plt.close() print 'saved '+'./plot/V_t_50_25_10.eps\n' # plot KE #Temp1 = Temp1-np.mean(Temp1) fig = plt.figure(figsize=(6,3)) #T50, = plt.plot(time0[np.where(time0<=4)], Temp0[np.where(time0<=4)]/V, 'r-',linewidth=1.5) T10, = plt.plot(time0[np.where(time0<=5)], Temp0[np.where(time0<=5)]/V, 'g',linewidth=1.5) #T10, = plt.plot(time2[np.where(time2<=4)], Temp2[np.where(time2<=4)]/V, 'b-',linewidth=1.5) #T10, = plt.plot(time2b[np.where(time2b<=4)], Temp2b[np.where(time2b<=4)]/V, 'b-',linewidth=1.5) #plt.ylim([0.0014, 0.00142]) plt.xlim([0, 7]) plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.xticks(np.linspace(0,5,6),np.linspace(0,5,6).astype(int)) plt.legend([T10],['$B10_m$'],loc=4) #plt.xlim([0.920, 0.980]) #plt.legend([T50,T25,T10],['B50_m','B25_m','B10_m']) #plt.plot(time1, KE1/V, 'k',linewidth=1.5) plt.xlabel("Time $[days]$",fontsize=18) plt.ylabel("$l^2-norm$ w $[m s^{-1}]$",fontsize=18) #plt.ylim([1.48, 1.52]) plt.tight_layout() plt.savefig('./plot/V_t_25.eps') plt.close() print 'saved '+'./plot/V_t_25.eps\n' #
gpl-2.0
3,574,806,178,951,072,000
30.009009
96
0.665892
false
m4rx9/rna-pdb-tools
rna_tools/tools/rna_filter/rna_filter.py
2
11906
#!/usr/bin/env python """rna_filter.py - calculate distances based on given restrants on PDB files or SimRNA trajectories. Changes: weight is always 1 (at least for now). ,>,=,>=,<= . [PREVIOUS DOCUMENTATION - TO BE REMOVED] rna_filter.py -s 4gxy_rpr.pdb -r rp06_MohPairs.rfrestrs d:A5-A42 100.0 measured: 26.7465763417 [x] d:A11-A26 100.0 measured: 19.2863696104 [x] [mm] rp06$ git:(master) $ rna_filter.py -s 4gxy_rpr.pdb -r rp06_MohPairs.rfrestrs d:A5-A42 100.0 measured: 26.7465763417 [x] d:A11-A26 100.0 measured: 19.2863696104 [x] Traceback (most recent call last): File "/home/magnus/work-src/rna-pdb-tools/bin/rna_filter.py", line 270, in <module> calc_scores_for_pdbs(args.structures, restraints, args.verbose) File "/home/magnus/work-src/rna-pdb-tools/bin/rna_filter.py", line 221, in calc_scores_for_pdbs dist = get_distance(residues[h[0]]['mb'], residues[h[1]]['mb']) KeyError: 'A24' correct, there is no A24 in this structure: The format of restraints:: (d:A1-A2 < 10.0 1) = if distance between A1 and A2 lower than 10.0, score it with 1 Usage:: $ python rna_filter.py -r test_data/restraints.txt -s test_data/CG.pdb d:A1-A2 10.0 measured: 6.58677550096 [x] test_data/CG.pdb 1.0 1 out of 1 # $ python rna_filter.py -r test_data/restraints.txt -t test_data/CG.trafl (d:A1-A2 < 10.0 1)|(d:A2-A1 <= 10 1) restraints [('A1', 'A2', '<', '10.0', '1'), ('A2', 'A1', '<=', '10', '1')] Frame #1 e:1252.26 mb for A1 [ 54.729 28.9375 41.421 ] mb for A2 [ 55.3425 35.3605 42.7455] d:A1-A2 6.58677550096 mb for A2 [ 55.3425 35.3605 42.7455] mb for A1 [ 54.729 28.9375 41.421 ] d:A2-A1 6.58677550096 # this ^ is off right now """ from __future__ import print_function from rna_tools.tools.rna_calc_rmsd.lib.rmsd.calculate_rmsd import get_coordinates from rna_tools.tools.extra_functions.select_fragment import select_pdb_fragment_pymol_style, select_pdb_fragment from rna_tools.tools.simrna_trajectory.simrna_trajectory import SimRNATrajectory import argparse import re import numpy as np import os import logging logger = logging.getLogger() handler = logging.StreamHandler() logger.addHandler(handler) formatter = logging.Formatter( '%(asctime)-15s %(filename)s::%(funcName)s::%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) class RNAFilterErrorInRestraints(Exception): pass def parse_logic(restraints_fn, verbose): """Parse logic of restraints. Args: restraints_nf(string): path to a file with restraints in the rigth format (see below) verbose (bool) : be verbose? Format:: Returns: list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')] """ txt = '' with open(restraints_fn) as f: for l in f: if not l.startswith('#'): txt += l.strip() if verbose: logger.info(txt) restraints = re.findall( '\(d:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)\)', txt) return restraints def parse_logic_newlines(restraints_fn, offset=0, verbose=False): """Parse logic of restraints. Args: restraints_nf(string): path to a file with restraints in the rigth format (see below) verbose (bool) : be verbose? Format:: # ignore comments d:Y23-Y69 < 25.0 d:Y22-Y69 < 25.0 # d:<chain><resi_A>-<resi_B> <operator> <distance> <weight>; each restraints in a new line Raises: __main__.RNAFilterErrorInRestraints: Please check the format of your restraints! Returns: list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')] """ restraints = [] with open(restraints_fn) as f: for l in f: if l.strip(): if not l.startswith('#'): if verbose: logger.info(l) restraint = re.findall( 'd:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\>|\<|\<\=)\s*(?P<distance>[\d\.]+)', l) # (?P<weight>.+?)', l) if restraint: # without [0] it is restraints [[('Y23', 'Y69', '<', '25.0', '1')], [('Y22', 'Y69', '<', '25.0', '1')]] # why? to convert 'Y23', 'Y69', '<', '25.0', '1' -> 'Y23', 'Y69', '<', 25.0, 1 start = restraint[0][0][0] + str(int(restraint[0][0][1:]) + offset) end = restraint[0][1][0] + str(int(restraint[0][1][1:]) + offset) operator = restraint[0][2] distance = float(restraint[0][3]) weight = 1 # fix for now #float(restraint[0][4])]) restraints.append([start, end, operator, distance, weight]) else: raise RNAFilterErrorInRestraints('Please check the format of your restraints!') if len(restraints) == 0: raise RNAFilterErrorInRestraints('Please check the format of your restraints!') return restraints # [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')] def get_distance(a, b): diff = a - b return np.sqrt(np.dot(diff, diff)) def parse_pdb(pdb_fn, selection): """ {'A9': {'OP1': array([ 53.031, 21.908, 40.226]), 'C6': array([ 54.594, 27.595, 41.069]), 'OP2': array([ 52.811, 24.217, 39.125]), 'N4': array([ 53.925, 30.861, 39.743]), "C1'": array([ 55.611, 26.965, 43.258]), "C3'": array([ 53.904, 25.437, 43.809]), "O5'": array([ 53.796, 24.036, 41.353]), 'C5': array([ 54.171, 28.532, 40.195]), "O4'": array([ 55.841, 25.746, 42.605]), "C5'": array([ 54.814, 23.605, 42.274]), 'P': array( [ 53.57 , 23.268, 39.971]), "C4'": array([ 55.119, 24.697, 43.283]), "C2'": array([ 54.563, 26.706, 44.341]), 'N1': array([ 55.145, 27.966, 42.27 ]), "O2'": array([ 55.208, 26.577, 45.588]), 'N3': array([ 54.831, 30.285, 41.747]), 'O2': array([ 55.76 , 29.587, 43.719]), 'C2': array([ 55.258, 29.321, 42.618]), "O3'": array([ 53.272, 24.698, 44.789]), 'C4': array([ 54.313, 29.909, 40.572])}} """ V = {} with open(pdb_fn) as f: for line in f: if line.startswith("ATOM"): curr_chain_id = line[21] curr_resi = int(line[22: 26]) curr_atom_name = line[12: 16].strip() if selection: if curr_chain_id in selection: if curr_resi in selection[curr_chain_id]: x = line[30: 38] y = line[38: 46] z = line[46: 54] # V.append(np.asarray([x,y,z],dtype=float)) if curr_chain_id + str(curr_resi) in V: V[curr_chain_id + str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float) else: V[curr_chain_id + str(curr_resi)] = {} V[curr_chain_id + str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float) return V def check_condition(condition, wight): """return True/False, score""" pass def get_residues(pdb_fn, restraints, verbose): """ """ residues = set() for h in restraints: a = h[0] b = h[1] a = a[0] + ':' + a[1:] residues.add(a) # A19 b = b[0] + ':' + b[1:] residues.add(b) # set(['A:41', 'A:9', 'A:10', 'A:16']) selection = ','.join(residues) selection_parsed = select_pdb_fragment(selection, separator=",", splitting="[,:;]") residues = parse_pdb(pdb_fn, selection_parsed) # get mb for r in residues: if 'N9' in residues[r]: # A,G residues[r]['mb'] = residues[r]['N9'] - ((residues[r]['N9'] - residues[r]['C6']) / 2) else: # A,G residues[r]['mb'] = residues[r]['N1'] - ((residues[r]['N1'] - residues[r]['C4']) / 2) for r in residues: if verbose: logger.info(' '.join(['mb for ', str(r), str(residues[r]['mb'])])) return residues def get_parser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-r', "--restraints_fn", dest="restraints_fn", required=True, help="""restraints_fn: Format: (d:A9-A41 < 10.0 1)|(d:A41-A9 <= 10 1) """) parser.add_argument("-v", "--verbose", action="store_true", help="be verbose") parser.add_argument('-s', dest="structures", help='structures', nargs='+') # , type=string) parser.add_argument( '--offset', help='use offset to adjust your restraints to numbering in PDB files, ade (1y26)' 'pdb starts with 13, so offset is -12)', default=0, type=int) parser.add_argument('-t', dest="trajectory", help="SimRNA trajectory") return parser def calc_scores_for_pdbs(pdb_files, restraints, verbose): """ """ # h = ('A1', 'A2', '<', '10.0', '1') print('fn, rst_score') for pdb_fn in pdb_files: # logger.info(pdb_fn) if verbose: print(pdb_fn, end=",") score = 0 residues = get_residues(pdb_fn, restraints, verbose) good_dists = 0 for h in restraints: dist = get_distance(residues[h[0]]['mb'], residues[h[1]]['mb']) # change distance ok = '[ ]' is_fulfiled = eval('dist ' + h[2] + ' h[3]') if is_fulfiled: # dist is calculated above score += h[4] ok = '[x]' good_dists += 1 if verbose: print(' '.join([' d:' + h[0] + '-' + h[1] + ' ' + str(h[4]), 'measured:', str(dist), ok])) if verbose: print(pdb_fn, score / float(len(restraints)), good_dists, 'out of', len(restraints)) # print(pdb_fn, score / float(len(restraints)), good_dists, 'out of', len(restraints)) print('%s,%f' % (os.path.basename(pdb_fn), score / float(len(restraints)))) # , good_dists, 'out of', len(restraints)) def __filter_simrna_trajectory(): f = (line for line in open(args.trajectory)) c = 0 while 1: try: header = f.next().strip() except StopIteration: # not nice break c += 1 coords = f.next().strip() traj = SimRNATrajectory() traj.load_from_string(c, header + '\n' + coords) frame = traj.frames[0] print(c) for h in restraints: a = int(h[0].replace('A', '')) - 1 # A1 -> 0 (indexing Python-like) b = int(h[1].replace('A', '')) - 1 a_mb = frame.residues[a].get_center() b_mb = frame.residues[b].get_center() # print ' mb for A' + str(a+1), a_mb # print ' mb for A' + str(b+1), b_mb dist = get_distance(a_mb, b_mb) logger.info(' '.join(' d:A' + str(a + 1) + "-A" + str(b + 1), dist)) # main if __name__ == '__main__': parser = get_parser() args = parser.parse_args() # score = 1 # print ((True|True)|(False|False)), score restraints = parse_logic_newlines(args.restraints_fn, args.offset, args.verbose) if args.verbose: logger.info('restraints' + str(restraints)) if args.structures: calc_scores_for_pdbs(args.structures, restraints, args.verbose) # if args.trajectory: # __filter_simrna_trajectory()
mit
4,392,061,652,920,878,600
37.160256
446
0.529733
false
stefanseefeld/numba
numba/tests/test_array_iterators.py
1
14037
from __future__ import division import itertools import numpy as np from numba import unittest_support as unittest from numba import jit, typeof, types from numba.compiler import compile_isolated from .support import TestCase, CompilationCache, MemoryLeakMixin, tag def array_iter(arr): total = 0 for i, v in enumerate(arr): total += i * v return total def array_view_iter(arr, idx): total = 0 for i, v in enumerate(arr[idx]): total += i * v return total def array_flat(arr, out): for i, v in enumerate(arr.flat): out[i] = v def array_flat_getitem(arr, ind): return arr.flat[ind] def array_flat_setitem(arr, ind, val): arr.flat[ind] = val def array_flat_sum(arr): s = 0 for i, v in enumerate(arr.flat): s = s + (i + 1) * v return s def array_flat_len(arr): return len(arr.flat) def array_ndenumerate_sum(arr): s = 0 for (i, j), v in np.ndenumerate(arr): s = s + (i + 1) * (j + 1) * v return s def np_ndindex_empty(): s = 0 for ind in np.ndindex(()): s += s + len(ind) + 1 return s def np_ndindex(x, y): s = 0 n = 0 for i, j in np.ndindex(x, y): s = s + (i + 1) * (j + 1) return s def np_ndindex_array(arr): s = 0 n = 0 for indices in np.ndindex(arr.shape): for i, j in enumerate(indices): s = s + (i + 1) * (j + 1) return s def np_nditer1(a): res = [] for u in np.nditer(a): res.append(u.item()) return res def np_nditer2(a, b): res = [] for u, v in np.nditer((a, b)): res.append((u.item(), v.item())) return res def np_nditer3(a, b, c): res = [] for u, v, w in np.nditer((a, b, c)): res.append((u.item(), v.item(), w.item())) return res def iter_next(arr): it = iter(arr) it2 = iter(arr) return next(it), next(it), next(it2) class TestArrayIterators(MemoryLeakMixin, TestCase): """ Test array.flat, np.ndenumerate(), etc. """ def setUp(self): super(TestArrayIterators, self).setUp() self.ccache = CompilationCache() def check_array_iter(self, arr): pyfunc = array_iter cres = compile_isolated(pyfunc, [typeof(arr)]) cfunc = cres.entry_point expected = pyfunc(arr) self.assertPreciseEqual(cfunc(arr), expected) def check_array_view_iter(self, arr, index): pyfunc = array_view_iter cres = compile_isolated(pyfunc, [typeof(arr), typeof(index)]) cfunc = cres.entry_point expected = pyfunc(arr, index) self.assertPreciseEqual(cfunc(arr, index), expected) def check_array_flat(self, arr, arrty=None): out = np.zeros(arr.size, dtype=arr.dtype) nb_out = out.copy() if arrty is None: arrty = typeof(arr) cres = compile_isolated(array_flat, [arrty, typeof(out)]) cfunc = cres.entry_point array_flat(arr, out) cfunc(arr, nb_out) self.assertTrue(np.all(out == nb_out), (out, nb_out)) def check_array_unary(self, arr, arrty, func): cres = compile_isolated(func, [arrty]) cfunc = cres.entry_point self.assertPreciseEqual(cfunc(arr), func(arr)) def check_array_flat_sum(self, arr, arrty): self.check_array_unary(arr, arrty, array_flat_sum) def check_array_ndenumerate_sum(self, arr, arrty): self.check_array_unary(arr, arrty, array_ndenumerate_sum) @tag('important') def test_array_iter(self): # Test iterating over a 1d array arr = np.arange(6) self.check_array_iter(arr) arr = arr[::2] self.assertFalse(arr.flags.c_contiguous) self.assertFalse(arr.flags.f_contiguous) self.check_array_iter(arr) arr = np.bool_([1, 0, 0, 1]) self.check_array_iter(arr) def test_array_view_iter(self): # Test iterating over a 1d view over a 2d array arr = np.arange(12).reshape((3, 4)) self.check_array_view_iter(arr, 1) self.check_array_view_iter(arr.T, 1) arr = arr[::2] self.check_array_view_iter(arr, 1) arr = np.bool_([1, 0, 0, 1]).reshape((2, 2)) self.check_array_view_iter(arr, 1) @tag('important') def test_array_flat_3d(self): arr = np.arange(24).reshape(4, 2, 3) arrty = typeof(arr) self.assertEqual(arrty.ndim, 3) self.assertEqual(arrty.layout, 'C') self.assertTrue(arr.flags.c_contiguous) # Test with C-contiguous array self.check_array_flat(arr) # Test with Fortran-contiguous array arr = arr.transpose() self.assertFalse(arr.flags.c_contiguous) self.assertTrue(arr.flags.f_contiguous) self.assertEqual(typeof(arr).layout, 'F') self.check_array_flat(arr) # Test with non-contiguous array arr = arr[::2] self.assertFalse(arr.flags.c_contiguous) self.assertFalse(arr.flags.f_contiguous) self.assertEqual(typeof(arr).layout, 'A') self.check_array_flat(arr) # Boolean array arr = np.bool_([1, 0, 0, 1] * 2).reshape((2, 2, 2)) self.check_array_flat(arr) def test_array_flat_empty(self): # Test .flat with various shapes of empty arrays, contiguous # and non-contiguous (see issue #846). arr = np.zeros(0, dtype=np.int32) arr = arr.reshape(0, 2) arrty = types.Array(types.int32, 2, layout='C') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='F') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='A') self.check_array_flat_sum(arr, arrty) arr = arr.reshape(2, 0) arrty = types.Array(types.int32, 2, layout='C') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='F') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='A') self.check_array_flat_sum(arr, arrty) def test_array_flat_getitem(self): # Test indexing of array.flat object pyfunc = array_flat_getitem def check(arr, ind): cr = self.ccache.compile(pyfunc, (typeof(arr), typeof(ind))) expected = pyfunc(arr, ind) self.assertEqual(cr.entry_point(arr, ind), expected) arr = np.arange(24).reshape(4, 2, 3) for i in range(arr.size): check(arr, i) arr = arr.T for i in range(arr.size): check(arr, i) arr = arr[::2] for i in range(arr.size): check(arr, i) arr = np.array([42]).reshape(()) for i in range(arr.size): check(arr, i) # Boolean array arr = np.bool_([1, 0, 0, 1]) for i in range(arr.size): check(arr, i) arr = arr[::2] for i in range(arr.size): check(arr, i) def test_array_flat_setitem(self): # Test indexing of array.flat object pyfunc = array_flat_setitem def check(arr, ind): arrty = typeof(arr) cr = self.ccache.compile(pyfunc, (arrty, typeof(ind), arrty.dtype)) # Use np.copy() to keep the layout expected = np.copy(arr) got = np.copy(arr) pyfunc(expected, ind, 123) cr.entry_point(got, ind, 123) self.assertPreciseEqual(got, expected) arr = np.arange(24).reshape(4, 2, 3) for i in range(arr.size): check(arr, i) arr = arr.T for i in range(arr.size): check(arr, i) arr = arr[::2] for i in range(arr.size): check(arr, i) arr = np.array([42]).reshape(()) for i in range(arr.size): check(arr, i) # Boolean array arr = np.bool_([1, 0, 0, 1]) for i in range(arr.size): check(arr, i) arr = arr[::2] for i in range(arr.size): check(arr, i) def test_array_flat_len(self): # Test len(array.flat) pyfunc = array_flat_len def check(arr): cr = self.ccache.compile(pyfunc, (typeof(arr),)) expected = pyfunc(arr) self.assertPreciseEqual(cr.entry_point(arr), expected) arr = np.arange(24).reshape(4, 2, 3) check(arr) arr = arr.T check(arr) arr = arr[::2] check(arr) arr = np.array([42]).reshape(()) check(arr) @tag('important') def test_array_ndenumerate_2d(self): arr = np.arange(12).reshape(4, 3) arrty = typeof(arr) self.assertEqual(arrty.ndim, 2) self.assertEqual(arrty.layout, 'C') self.assertTrue(arr.flags.c_contiguous) # Test with C-contiguous array self.check_array_ndenumerate_sum(arr, arrty) # Test with Fortran-contiguous array arr = arr.transpose() self.assertFalse(arr.flags.c_contiguous) self.assertTrue(arr.flags.f_contiguous) arrty = typeof(arr) self.assertEqual(arrty.layout, 'F') self.check_array_ndenumerate_sum(arr, arrty) # Test with non-contiguous array arr = arr[::2] self.assertFalse(arr.flags.c_contiguous) self.assertFalse(arr.flags.f_contiguous) arrty = typeof(arr) self.assertEqual(arrty.layout, 'A') self.check_array_ndenumerate_sum(arr, arrty) # Boolean array arr = np.bool_([1, 0, 0, 1]).reshape((2, 2)) self.check_array_ndenumerate_sum(arr, typeof(arr)) def test_array_ndenumerate_empty(self): arr = np.zeros(0, dtype=np.int32) arr = arr.reshape(0, 2) arrty = types.Array(types.int32, 2, layout='C') self.check_array_ndenumerate_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='F') self.check_array_ndenumerate_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='A') self.check_array_ndenumerate_sum(arr, arrty) arr = arr.reshape(2, 0) arrty = types.Array(types.int32, 2, layout='C') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='F') self.check_array_flat_sum(arr, arrty) arrty = types.Array(types.int32, 2, layout='A') self.check_array_flat_sum(arr, arrty) def test_np_ndindex(self): func = np_ndindex cres = compile_isolated(func, [types.int32, types.int32]) cfunc = cres.entry_point self.assertPreciseEqual(cfunc(3, 4), func(3, 4)) self.assertPreciseEqual(cfunc(3, 0), func(3, 0)) self.assertPreciseEqual(cfunc(0, 3), func(0, 3)) self.assertPreciseEqual(cfunc(0, 0), func(0, 0)) @tag('important') def test_np_ndindex_array(self): func = np_ndindex_array arr = np.arange(12, dtype=np.int32) + 10 self.check_array_unary(arr, typeof(arr), func) arr = arr.reshape((4, 3)) self.check_array_unary(arr, typeof(arr), func) arr = arr.reshape((2, 2, 3)) self.check_array_unary(arr, typeof(arr), func) def test_np_ndindex_empty(self): func = np_ndindex_empty cres = compile_isolated(func, []) cfunc = cres.entry_point self.assertPreciseEqual(cfunc(), func()) @tag('important') def test_iter_next(self): # This also checks memory management with iter() and next() func = iter_next arr = np.arange(12, dtype=np.int32) + 10 self.check_array_unary(arr, typeof(arr), func) class TestNdIter(MemoryLeakMixin, TestCase): """ Test np.nditer() """ def inputs(self): # All those inputs are compatible with a (3, 4) main shape # scalars yield np.float32(100) # 0-d arrays yield np.array(102, dtype=np.int16) # 1-d arrays yield np.arange(4).astype(np.complex64) yield np.arange(8)[::2] # 2-d arrays a = np.arange(12).reshape((3, 4)) yield a yield a.copy(order='F') a = np.arange(24).reshape((6, 4))[::2] yield a def basic_inputs(self): yield np.arange(4).astype(np.complex64) yield np.arange(8)[::2] a = np.arange(12).reshape((3, 4)) yield a yield a.copy(order='F') def check_result(self, got, expected): self.assertEqual(set(got), set(expected), (got, expected)) def test_nditer1(self): pyfunc = np_nditer1 cfunc = jit(nopython=True)(pyfunc) for a in self.inputs(): expected = pyfunc(a) got = cfunc(a) self.check_result(got, expected) @tag('important') def test_nditer2(self): pyfunc = np_nditer2 cfunc = jit(nopython=True)(pyfunc) for a, b in itertools.product(self.inputs(), self.inputs()): expected = pyfunc(a, b) got = cfunc(a, b) self.check_result(got, expected) def test_nditer3(self): pyfunc = np_nditer3 cfunc = jit(nopython=True)(pyfunc) # Use a restricted set of inputs, to shorten test time inputs = self.basic_inputs for a, b, c in itertools.product(inputs(), inputs(), inputs()): expected = pyfunc(a, b, c) got = cfunc(a, b, c) self.check_result(got, expected) def test_errors(self): # Incompatible shapes pyfunc = np_nditer2 cfunc = jit(nopython=True)(pyfunc) self.disable_leak_check() def check_incompatible(a, b): with self.assertRaises(ValueError) as raises: cfunc(a, b) self.assertIn("operands could not be broadcast together", str(raises.exception)) check_incompatible(np.arange(2), np.arange(3)) a = np.arange(12).reshape((3, 4)) b = np.arange(3) check_incompatible(a, b) if __name__ == '__main__': unittest.main()
bsd-2-clause
-1,506,032,949,859,428,900
30.402685
79
0.568355
false
karllessard/tensorflow
tensorflow/python/autograph/impl/api_test.py
4
35611
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for api module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import contextlib import functools import gc import imp import os import re import sys import textwrap import types import numpy as np import six from tensorflow.python.autograph.core import ag_ctx from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.core import converter_testing from tensorflow.python.autograph.impl import api from tensorflow.python.autograph.impl import conversion from tensorflow.python.autograph.pyct import errors from tensorflow.python.autograph.pyct import inspect_utils from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.utils import ag_logging from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.util import function_utils from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect global_n = 2 DEFAULT_RECURSIVE = converter.ConversionOptions(recursive=True) class TestResource(object): def __init__(self): self.x = 3 class ApiTest(test.TestCase): @contextlib.contextmanager def assertPrints(self, expected, not_expected): try: out_capturer = six.StringIO() sys.stdout = out_capturer yield self.assertIn(expected, out_capturer.getvalue()) self.assertNotIn(not_expected, out_capturer.getvalue()) finally: sys.stdout = sys.__stdout__ def assertNoMemoryLeaks(self, f): object_ids_before = {id(o) for o in gc.get_objects()} f() gc.collect() objects_after = tuple( o for o in gc.get_objects() if id(o) not in object_ids_before) self.assertEmpty( tuple(o for o in objects_after if isinstance(o, TestResource))) def test_converted_call_kwonly_args(self): def test_fn(*, a): return a x = api.converted_call( test_fn, (), {'a': constant_op.constant(-1)}, options=DEFAULT_RECURSIVE) self.assertEqual(-1, self.evaluate(x)) def test_super_with_no_arg(self): test_case_self = self class TestBase: def plus_three(self, x): return x + 3 class TestSubclass(TestBase): def plus_three(self, x): test_case_self.fail('This should never be called.') def no_arg(self, x): return super().plus_three(x) tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(5, tc.no_arg(2)) def test_converted_call_avoids_triggering_operators(self): test_self = self class Pair(collections.namedtuple('Pair', ['a', 'b'])): def __call__(self): return self.a + self.b def __eq__(self, other): test_self.fail('Triggered operator') p = Pair(constant_op.constant(1), constant_op.constant(2)) x = api.converted_call(p, (), {}, options=DEFAULT_RECURSIVE) self.assertIsNotNone(self.evaluate(x), 3) @test_util.run_deprecated_v1 def test_decorator_recursive(self): class TestClass(object): def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while math_ops.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], self.evaluate(x).tolist()) @test_util.run_deprecated_v1 def test_decorator_not_recursive(self): class TestClass(object): def called_member(self, a): return math_ops.negative(a) @api.convert(recursive=False) def test_method(self, x, s, a): while math_ops.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], self.evaluate(x).tolist()) @test_util.run_deprecated_v1 def test_convert_then_do_not_convert(self): class TestClass(object): @api.do_not_convert def called_member(self, a): return math_ops.negative(a) @api.convert(recursive=True) def test_method(self, x, s, a): while math_ops.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() x = tc.test_method( constant_op.constant((2, 4)), constant_op.constant(1), constant_op.constant(-2)) self.assertAllEqual((0, 1), self.evaluate(x)) @test_util.run_deprecated_v1 def test_decorator_calls_decorated(self): class TestClass(object): @api.convert() def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while math_ops.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], self.evaluate(x).tolist()) def test_decorator_preserves_argspec(self): class TestClass(object): def test_method(self, a): if a < 0: a = -a return a test_method_converted = api.convert()(test_method) tc = TestClass() self.assertListEqual( list(tf_inspect.getfullargspec(tc.test_method)), list(tf_inspect.getfullargspec(tc.test_method_converted))) def test_do_not_convert_argspec(self): class TestClass(object): def test_method(self, x, y): z = x + y return z test_method_allowlisted = api.do_not_convert(test_method) tc = TestClass() self.assertTrue(tf_inspect.ismethod(tc.test_method_allowlisted)) # Because the wrapped function is not generated, we can't preserve its # arg spec. self.assertEqual((), tuple(function_utils.fn_args(tc.test_method_allowlisted))) def test_do_not_convert_callable_object(self): class TestClass(object): def __call__(self): return 1 tc = TestClass() self.assertEqual(1, api.do_not_convert(tc)()) @test_util.run_deprecated_v1 def test_convert_call_site_decorator(self): class TestClass(object): def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while math_ops.reduce_sum(x) > s: x //= api.converted_call( self.called_member, (a,), None, options=DEFAULT_RECURSIVE) return x tc = TestClass() x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], self.evaluate(x).tolist()) def test_converted_call_builtin(self): x = api.converted_call(range, (3,), None, options=DEFAULT_RECURSIVE) self.assertEqual((0, 1, 2), tuple(x)) x = api.converted_call( re.compile, ('mnas_v4_a.*\\/.*(weights|kernel):0$',), None, options=DEFAULT_RECURSIVE) self.assertIsNotNone(x.match('mnas_v4_a/weights:0')) def test_converted_call_function(self): def test_fn(x): if x < 0: return -x return x x = api.converted_call( test_fn, (constant_op.constant(-1),), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) @test_util.run_v1_only('b/120545219') def test_converted_call_functools_partial(self): def test_fn(x, y, z): if x < 0: return -x, -y, -z return x, y, z x = api.converted_call( functools.partial(test_fn, constant_op.constant(-1), z=-3), (constant_op.constant(-2),), None, options=DEFAULT_RECURSIVE) self.assertEqual((1, 2, 3), self.evaluate(x)) x = api.converted_call( functools.partial( functools.partial(test_fn, constant_op.constant(-1)), z=-3), (constant_op.constant(-2),), None, options=DEFAULT_RECURSIVE) self.assertEqual((1, 2, 3), self.evaluate(x)) @test_util.run_v1_only('b/120545219') def test_converted_call_functools_partial_kwarg_mutation(self): def test_fn(x, y, z): if x < 0: return -x, -y, -z return x, y, z partial_fn = functools.partial(test_fn, constant_op.constant(-1), z=-3) # Call using kwargs to assign y first to ensure that partial_fn.keywords is # not mutated for subsequent calls (where y is assign through args). x = api.converted_call( partial_fn, args=(), kwargs={ 'y': constant_op.constant(-2), }, options=DEFAULT_RECURSIVE) self.assertEqual((1, 2, 3), self.evaluate(x)) x = api.converted_call( partial_fn, args=(constant_op.constant(-4),), kwargs=None, options=DEFAULT_RECURSIVE) self.assertEqual((1, 4, 3), self.evaluate(x)) def test_converted_call_method(self): class TestClass(object): def __init__(self, x): self.x = x def test_method(self): if self.x < 0: return -self.x return self.x tc = TestClass(constant_op.constant(-1)) x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) def test_converted_call_synthetic_method(self): class TestClass(object): def __init__(self, x): self.x = x def test_function(self): if self.x < 0: return -self.x return self.x tc = TestClass(constant_op.constant(-1)) test_method = types.MethodType(test_function, tc) x = api.converted_call(test_method, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) def test_converted_call_method_wrapper(self): class TestClass(object): def foo(self): pass tc = TestClass() # `method.__get__()` returns a so-called method-wrapper. wrapper = api.converted_call( tc.foo.__get__, (tc,), None, options=DEFAULT_RECURSIVE) self.assertEqual(wrapper, tc.foo) def test_converted_call_method_as_object_attribute(self): class AnotherClass(object): def __init__(self): self.another_class_attr = constant_op.constant(1) def method(self): if self.another_class_attr > 0: return self.another_class_attr + 1 return self.another_class_attr + 10 class TestClass(object): def __init__(self, another_obj_method): self.another_obj_method = another_obj_method obj = AnotherClass() tc = TestClass(obj.method) x = api.converted_call( tc.another_obj_method, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(self.evaluate(x), 2) def test_converted_call_method_converts_recursively(self): class TestClass(object): def __init__(self, x): self.x = x def other_method(self): if self.x < 0: return -self.x return self.x def test_method(self): return self.other_method() tc = TestClass(constant_op.constant(-1)) x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) def test_converted_call_method_by_class(self): class TestClass(object): def __init__(self, x): self.x = x def test_method(self): if self.x < 0: return -self.x return self.x tc = TestClass(constant_op.constant(-1)) x = api.converted_call( TestClass.test_method, (tc,), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) def test_converted_call_callable_object(self): class TestClass(object): def __init__(self, x): self.x = x def __call__(self): if self.x < 0: return -self.x return self.x tc = TestClass(constant_op.constant(-1)) x = api.converted_call(tc, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(1, self.evaluate(x)) def test_converted_call_callable_metaclass(self): test_self = self class TestMetaclass(type): def __call__(cls): self.assertTrue(converter_testing.is_inside_generated_code()) inst = object.__new__(cls) inst.__init__() def instance_call(unused_self): test_self.fail( 'The class-bound __call__ should be called, not the instance' ' bound one.') inst.__call__ = instance_call return inst tmc = TestMetaclass('TestClass', (), {}) tc = api.converted_call(tmc, (), None, options=DEFAULT_RECURSIVE) self.assertIsInstance(tc, tmc) def test_converted_call_callable_abc(self): test_self = self @six.add_metaclass(abc.ABCMeta) class TestBase(object): @abc.abstractmethod def __call__(self): test_self.fail('This should not be called') class TestSubclass(TestBase): def __init__(self): test_self.assertFalse(converter_testing.is_inside_generated_code()) def __call__(self, expected): test_self.assertTrue(expected) test_self.assertTrue(converter_testing.is_inside_generated_code()) tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE) api.converted_call(tc, (True,), None, options=DEFAULT_RECURSIVE) @test_util.run_deprecated_v1 def test_converted_call_constructor(self): test_self = self class TestClass(object): def __init__(self): test_self.assertFalse(converter_testing.is_inside_generated_code()) tc = api.converted_call(TestClass, (), None, options=DEFAULT_RECURSIVE) self.assertIsInstance(tc, TestClass) def test_converted_call_mangled_properties(self): class TestClass(object): def __init__(self): self.__private = constant_op.constant(-1) def test_method(self): return self.__private tc = TestClass() with self.assertRaisesRegex( errors.UnsupportedLanguageElementError, 'mangled names'): api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) # TODO(mdan): Refactor to avoid this use of global state. ag_logging.set_verbosity(0, True) os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0' with self.assertPrints('could not transform', 'bug'): api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) ag_logging.set_verbosity(0, False) os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1' def test_converted_call_partial_of_allowlisted_function(self): def test_fn(_): self.assertFalse(converter_testing.is_inside_generated_code()) converter_testing.allowlist(test_fn) api.converted_call( functools.partial(test_fn, None), (), None, options=DEFAULT_RECURSIVE) def test_converted_call_already_converted(self): def f(x): return x == 0 x = api.converted_call( f, (constant_op.constant(0),), None, options=DEFAULT_RECURSIVE) self.assertTrue(self.evaluate(x)) converted_f = api.to_graph( f, experimental_optional_features=converter.Feature.ALL) x = api.converted_call( converted_f, (constant_op.constant(0),), None, options=DEFAULT_RECURSIVE) self.assertTrue(self.evaluate(x)) def test_converted_call_then_already_converted_dynamic(self): @api.convert() def g(x): if x > 0: return x else: return -x def f(g, x): return g(x) x = api.converted_call( f, (g, constant_op.constant(1)), None, options=DEFAULT_RECURSIVE) self.assertEqual(self.evaluate(x), 1) def test_converted_call_forced_when_explicitly_allowlisted(self): @api.do_not_convert() def f(x): return x + 1 opts = converter.ConversionOptions(recursive=True, user_requested=True) x = api.converted_call(f, (constant_op.constant(0),), None, options=opts) self.assertTrue(self.evaluate(x)) converted_f = api.to_graph( f, experimental_optional_features=converter.Feature.ALL) x = api.converted_call(converted_f, (0,), None, options=DEFAULT_RECURSIVE) self.assertEqual(x, 1) @test_util.run_deprecated_v1 def test_converted_call_no_user_code(self): def f(x): return len(x) opts = converter.ConversionOptions(internal_convert_user_code=False) # f should not be converted, causing len to error out. with self.assertRaisesRegex(Exception, 'len is not well defined'): api.converted_call(f, (constant_op.constant([0]),), None, options=opts) # len on the other hand should work fine. x = api.converted_call( len, (constant_op.constant([0]),), None, options=opts) # The constant has static shape so the result is a primitive not a Tensor. self.assertEqual(x, 1) def test_converted_call_no_kwargs_allowed(self): def f(*args): # Note: np.broadcast rejects any **kwargs, even *{} return np.broadcast(args[:1]) opts = converter.ConversionOptions(internal_convert_user_code=False) self.assertIsNotNone( api.converted_call(f, (1, 2, 3, 4), None, options=opts)) def test_converted_call_allowlisted_method(self): class TestClass(object): def method(self): return converter_testing.is_inside_generated_code() obj = TestClass() converter_testing.allowlist(obj.method.__func__) self.assertFalse( api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE)) def test_converted_call_allowlisted_method_via_owner(self): class TestClass(object): def method(self): return converter_testing.is_inside_generated_code() converter_testing.allowlist(TestClass) obj = TestClass() self.assertFalse( api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE)) def test_converted_call_numpy(self): x = api.converted_call(np.arange, (5,), None, options=DEFAULT_RECURSIVE) self.assertAllEqual(x, list(range(5))) def test_converted_call_tf_op_forced(self): # TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS. opts = converter.ConversionOptions( user_requested=True, optional_features=None) x = api.converted_call(math_ops.add, (1, 1), None, options=opts) self.assertAllEqual(self.evaluate(x), 2) def test_converted_call_exec_generated_code(self): temp_mod = imp.new_module('test_module') dynamic_code = """ def foo(x): return x + 1 """ exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used opts = converter.ConversionOptions(optional_features=None) x = api.converted_call(temp_mod.foo, (1,), None, options=opts) self.assertAllEqual(x, 2) def test_converted_call_namedtuple(self): x = api.converted_call( collections.namedtuple, ('TestNamedtuple', ('a', 'b')), None, options=DEFAULT_RECURSIVE) self.assertTrue(inspect_utils.isnamedtuple(x)) def test_converted_call_namedtuple_via_collections(self): x = api.converted_call( collections.namedtuple, ('TestNamedtuple', ('a', 'b')), None, options=DEFAULT_RECURSIVE) self.assertTrue(inspect_utils.isnamedtuple(x)) def test_converted_call_namedtuple_subclass_bound_method(self): class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))): def test_method(self, x): while math_ops.reduce_sum(x) > self.a: x //= self.b return x obj = TestClass(5, 2) x = api.converted_call( obj.test_method, (constant_op.constant([2, 4]),), None, options=DEFAULT_RECURSIVE) self.assertAllEqual(self.evaluate(x), [1, 2]) def test_converted_call_namedtuple_method(self): class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))): pass obj = TestClass(5, 2) # _asdict is a documented method of namedtuple. x = api.converted_call(obj._asdict, (), None, options=DEFAULT_RECURSIVE) self.assertDictEqual(x, {'a': 5, 'b': 2}) def test_converted_call_namedtuple_subclass_unbound_method(self): class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))): def test_method(self, x): while math_ops.reduce_sum(x) > self.a: x //= self.b return x obj = TestClass(5, 2) x = api.converted_call( TestClass.test_method, (obj, constant_op.constant([2, 4])), None, options=DEFAULT_RECURSIVE) self.assertAllEqual(self.evaluate(x), [1, 2]) def test_converted_call_lambda(self): l = lambda x: x == 0 x = api.converted_call( l, (constant_op.constant(0),), None, options=DEFAULT_RECURSIVE) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(True, self.evaluate(x)) def test_converted_call_defun_object_method(self): # pylint:disable=method-hidden class TestClass(object): def method(self): return 1 def prepare(self): self.method = function.defun(self.method) # pylint:enable=method-hidden tc = TestClass() tc.prepare() x = api.converted_call(tc.method, (), None, options=DEFAULT_RECURSIVE) self.assertAllEqual(1, self.evaluate(x)) def test_converted_call_native_binding(self): x = api.converted_call(np.power, (2, 2), None, options=DEFAULT_RECURSIVE) self.assertAllEqual(x, 4) def test_converted_call_native_binding_errorneous(self): class FaultyBinding(object): def __array__(self): raise ValueError('fault') bad_obj = FaultyBinding() def fail_if_warning(*_): self.fail('No warning should be issued') with test.mock.patch.object(ag_logging, 'warn', fail_if_warning): with self.assertRaisesRegex(ValueError, 'fault'): api.converted_call( np.power, (bad_obj, 2), None, options=DEFAULT_RECURSIVE) def test_converted_call_through_tf_dataset(self): def other_fn(x): if x > 0: return x return -x def f(): return dataset_ops.Dataset.range(-3, 3).map(other_fn) # Dataset iteration only works inside math_ops. @def_function.function def graph_fn(): ds = api.converted_call(f, (), None, options=DEFAULT_RECURSIVE) itr = iter(ds) return next(itr), next(itr), next(itr) self.assertAllEqual(self.evaluate(graph_fn()), (3, 2, 1)) def test_converted_call_no_leaks_via_closure(self): def test_fn(): res = TestResource() def f(y): return res.x + y api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE) self.assertNoMemoryLeaks(test_fn) def test_converted_call_no_leaks_via_inner_function_closure(self): def test_fn(): res = TestResource() def f(y): def inner_f(): return res.x + y return inner_f api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE)() self.assertNoMemoryLeaks(test_fn) def test_converted_call_no_caching_on_abort(self): def test_fn(needs_autograph): if needs_autograph: if constant_op.constant(True): x = constant_op.constant(1) else: x = constant_op.constant(2) else: x = 3 return x def call_in_disabled_context(): with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED): return api.converted_call( test_fn, (False,), None, options=DEFAULT_RECURSIVE) def call_in_default_context(): with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED): return api.converted_call( test_fn, (True,), None, options=DEFAULT_RECURSIVE) # Note: this is an invariant, not a test (see above). assert call_in_disabled_context() == 3 # If api.convert placed test_fn in the unconverted cache, this second # invocation would fail. self.assertEqual(self.evaluate(call_in_default_context()), 1) def test_converted_call_caching_of_allowlisted_bound_methods(self): class TestClass(object): def __init__(self): self.__private = constant_op.constant(-1) def test_method(self): return self.__private # TODO(mdan): Refactor to avoid this use of global state. cache_size_before = len(conversion._ALLOWLIST_CACHE) # First invocation with fallback on, to allow recording it into cache. os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0' tc = TestClass() api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1' # Entry should be added to the allowlist cache. self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1) # A second invocation should go through even with fallback off. tc = TestClass() api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE) # No new entries should appear in the allowlist cache. self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1) def test_context_tracking_direct_calls(self): @api.do_not_convert() def unconverted_fn(): self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.DISABLED) @api.convert() def converted_fn(): self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED) unconverted_fn() self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED) self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED) converted_fn() self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED) @api.call_with_unspecified_conversion_status def unspecified_fn(): self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED) unspecified_fn() def test_to_graph_basic(self): def test_fn(x, s): while math_ops.reduce_sum(x) > s: x //= 2 return x compiled_fn = api.to_graph(test_fn) with ops.Graph().as_default(): x = compiled_fn(constant_op.constant((4, 8)), 4) self.assertAllEqual(self.evaluate(x), (1, 2)) @test_util.run_deprecated_v1 def test_to_graph_with_defaults(self): foo = 4 def test_fn(x, s=foo): while math_ops.reduce_sum(x) > s: x //= 2 return x compiled_fn = api.to_graph(test_fn) x = compiled_fn(constant_op.constant([4, 8])) self.assertListEqual([1, 2], self.evaluate(x).tolist()) def test_to_graph_with_globals(self): def test_fn(x): global global_n global_n = x + global_n return global_n converted_fn = api.to_graph(test_fn) prev_val = global_n converted_fn(10) self.assertGreater(global_n, prev_val) def test_to_graph_with_kwargs_clashing_converted_call(self): def called_fn(**kwargs): return kwargs['f'] + kwargs['owner'] def test_fn(): # These arg names intentionally match converted_call's return called_fn(f=1, owner=2) compiled_fn = api.to_graph(test_fn) self.assertEqual(compiled_fn(), 3) def test_to_graph_with_kwargs_clashing_unconverted_call(self): @api.do_not_convert def called_fn(**kwargs): return kwargs['f'] + kwargs['owner'] def test_fn(): # These arg names intentionally match _call_unconverted's return called_fn(f=1, owner=2) compiled_fn = api.to_graph(test_fn) self.assertEqual(compiled_fn(), 3) def test_to_graph_caching(self): def test_fn(x): if x > 0: return x else: return -x converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1)) # All outputs are from the same module. We can't use __module__ because # that's reset when we instantiate the function (see conversion.py). # TODO(mdan): Can and should we overwrite __module__ instead? module_names = frozenset(f.ag_module for f in converted_functions) self.assertEqual(len(module_names), 1) self.assertNotIn('__main__', module_names) self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3) def test_to_graph_caching_different_options(self): def called_fn(): pass def test_fn(): return called_fn() converted_recursive = api.to_graph(test_fn, recursive=True) converted_non_recursive = api.to_graph(test_fn, recursive=False) self.assertNotEqual(converted_recursive.ag_module, converted_non_recursive.ag_module) self.assertRegex( tf_inspect.getsource(converted_recursive), 'FunctionScope(.*recursive=True.*)') self.assertRegex( tf_inspect.getsource(converted_non_recursive), 'FunctionScope(.*recursive=False.*)') def test_to_graph_preserves_bindings(self): y = 3 def test_fn(): return y converted = api.to_graph(test_fn) self.assertEqual(converted(), 3) y = 7 self.assertEqual(converted(), 7) def test_to_graph_source_map(self): def test_fn(y): return y**2 self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map')) def test_to_graph_sets_conversion_context(self): def g(): self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED) return 0 # Note: the autograph=False sets the connect to Status.DISABLED. The test # verifies that to_graph overrides that. @def_function.function(autograph=False) def f(): converted_g = api.to_graph(g) converted_g() f() def test_to_code_basic(self): def test_fn(x, s): while math_ops.reduce_sum(x) > s: x /= 2 return x # Just check that the output is parseable Python code. self.assertIsNotNone(parser.parse(api.to_code(test_fn))) def test_to_code_with_wrapped_function(self): @def_function.function def test_fn(x, s): while math_ops.reduce_sum(x) > s: x /= 2 return x with self.assertRaisesRegex(Exception, 'try passing.*python_function'): api.to_code(test_fn) def test_tf_convert_overrides_current_context(self): def f(expect_converted): self.assertEqual( converter_testing.is_inside_generated_code(), expect_converted) @api.do_not_convert def test_fn(ctx, expect_converted): return api.tf_convert(f, ctx)(expect_converted) test_fn( ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED), True) test_fn( ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED), False) def test_tf_convert_unspecified_not_converted_by_default(self): def f(): self.assertEqual(ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED) self.assertFalse(converter_testing.is_inside_generated_code()) @def_function.function def test_fn(ctx): return api.tf_convert(f, ctx, convert_by_default=False)() test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED)) def test_tf_convert_allowlisted_method(self): if six.PY2: self.skipTest('Test bank not comptible with Python 2.') class TestClass(object): def method(self): return converter_testing.is_inside_generated_code() converter_testing.allowlist(TestClass.method) obj = TestClass() converted_call = api.tf_convert( obj.method, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED)) _, converted_target = tf_decorator.unwrap(converted_call) self.assertIs(converted_target.__func__, obj.method.__func__) def test_tf_convert_tf_decorator_unwrapping_context_enabled(self): def f(): self.assertTrue(converter_testing.is_inside_generated_code()) @functools.wraps(f) def wrapper(*args, **kwargs): return wrapper.__wrapped__(*args, **kwargs) decorated_f = tf_decorator.make_decorator(f, wrapper) def test_fn(ctx): return api.tf_convert(decorated_f, ctx)() test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED)) def test_tf_convert_tf_decorator_unwrapping_context_disabled(self): def f(): self.assertFalse(converter_testing.is_inside_generated_code()) @functools.wraps(f) def wrapper(*args, **kwargs): return wrapper.__wrapped__(*args, **kwargs) decorated_f = tf_decorator.make_decorator(f, wrapper) def test_fn(ctx): return api.tf_convert(decorated_f, ctx)() test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED)) def test_tf_convert_tf_decorator_allowlist_method(self): def wrap(f): def wrapper(*args, **kwargs): return wrapper.__wrapped__(*args, **kwargs) return tf_decorator.make_decorator(f, wrapper) class TestClass(object): @wrap def method(self): return converter_testing.is_inside_generated_code() converter_testing.allowlist(TestClass.method) obj = TestClass() # It's intended that tf_convert modifies the original method in this case. # This is not desirable, but options are limited. converted = api.tf_convert( obj.method, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED)) self.assertTrue(converted()) self.assertTrue(obj.method()) def test_super_with_one_arg(self): test_case_self = self class TestBase(object): def plus_three(self, x): return x + 3 class TestSubclass(TestBase): def plus_three(self, x): test_case_self.fail('This should never be called.') def one_arg(self, x): test_base_unbound = super(TestSubclass) test_base = test_base_unbound.__get__(self, TestSubclass) return test_base.plus_three(x) tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(5, tc.one_arg(2)) def test_super_with_two_args(self): test_case_self = self class TestBase(object): def plus_three(self, x): return x + 3 class TestSubclass(TestBase): def plus_three(self, x): test_case_self.fail('This should never be called.') def two_args(self, x): return super(TestSubclass, self).plus_three(x) tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE) self.assertEqual(5, tc.two_args(2)) if __name__ == '__main__': os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1' test.main()
apache-2.0
-160,551,979,138,421,540
27.128752
86
0.642526
false
djhshih/genomic
utils/genompy/genompy/plot/cn.py
1
6009
#!/usr/bin/env python3 import numpy as np import matplotlib import matplotlib.lines as lines import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib.ticker as ticker from .. import cn def plot_sample_profile(x, y, seg_x=None, seg_y=None, subplot_spec=None, ax=None, hide_xaxis=False, yref=0, downsample=1): ax = plt.subplot(subplot_spec, sharex=ax) # plot reference line ax.axhline(yref, color='#cccccc', lw=8) if downsample > 1: marker = '.' else: marker = 'o' # plot data points ax.plot(x[::downsample], y[::downsample], 'k', marker=marker, ls='') # plot segments if seg_x is not None and seg_y is not None: for i in range(len(seg_y)): # FIXME expose colour thresholds as parameters if seg_y[i] > 0.2: color = '#bd0026' elif seg_y[i] < -0.2: color = '#0868ac' else: color = '#666666' line = lines.Line2D(seg_x[i, :], [seg_y[i], seg_y[i]], color=color, lw=2) ax.add_line(line) ax.yaxis.tick_left() if hide_xaxis: for side in ('right', 'top', 'bottom'): ax.spines[side].set_color('none') ax.xaxis.set(visible=False) return ax def coordinate_kbp(x, pos): return '{} kbp'.format(x/1e3) def coordinate_mbp(x, pos): return '{} Mbp'.format(x/1e6) def draw_xaxis(subplot_spec, ax): ax2 = plt.subplot(subplot_spec, sharex=ax) ax2.spines['right'].set_color('none') ax2.spines['left'].set_color('none') ax2.spines['top'].set_color('none') ax2.set(yticks=[]) ax2.xaxis.tick_bottom() def plot_mrna(gene_region, ax, top): h = 0.05 # draw intron line = lines.Line2D([gene_region.start, gene_region.end], [top - h/2, top - h/2], color='#fd8d3c', lw=2, zorder=0) ax.add_line(line) # draw exons for exon in gene_region.exons: s = exon.start e = exon.end x = [s, e, e, s] y = [top-h, top-h, top, top] ax.fill(x, y, '#fd8d3c', lw=2, ec='#fd8d3c') # draw coding regions for cds in gene_region.coding_exons: s = cds.start e = cds.end x = [s, e, e, s] y = [top-h, top-h, top, top] ax.fill(x, y, '#f03b20', lw=2, ec='#f03b20') # annotate gene size = gene_region.end - gene_region.start + 1 mid = gene_region.start + size/2 ax.text(mid, -0.3, gene_region.name, horizontalalignment='right', clip_on=True, rotation=30) def plot_mrnas(genes, subplot_spec, ax): ax = plt.subplot(subplot_spec, sharex=ax) #top = -1 for g in genes: if g.strand == '+': #if top < 0: top = 0 else: top = -0.1 plot_mrna(g, ax, top) # remove ticks and boxes ax.xaxis.set(visible=False) ax.yaxis.set(visible=False) ax.set_frame_on(False) # set axis limits ax.set_ylim(bottom=-0.8, top=0) return ax def get_gene_regions(genomicRegion): genes = [] for g in genomicRegion.genes: genes.append(cn.GeneRegion(g, genomicRegion.geneDb)) # sort genes by starting position genes.sort(key=lambda g: g.start) return genes from matplotlib.transforms import Bbox, TransformedBbox, blended_transform_factory from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector, BboxConnectorPatch def connect_bboxes(bbox1, bbox2, \ loc1a, loc2a, loc1b, loc2b, \ prop_lines, prop_patches=None): if prop_patches is None: prop_patches = prop_lines.copy() prop_patches['alpha'] = prop_patches.get('alpha', 1)*0.1 c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines) c1.set_clip_on(False) c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines) c2.set_clip_on(False) bbox_patch1 = BboxPatch(bbox1, **prop_patches) bbox_patch2 = BboxPatch(bbox2, **prop_patches) p = BboxConnectorPatch(bbox1, bbox2, loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b, **prop_patches) p.set_clip_on(False) return c1, c2, bbox_patch1, bbox_patch2, p def zoom_effect(ax1, ax2, xlim, **kwargs): trans1 = blended_transform_factory(ax1.transData, ax1.transAxes) trans2 = blended_transform_factory(ax2.transData, ax2.transAxes) bbox = Bbox.from_extents(xlim[0], 0, xlim[1], 1) tbbox1 = TransformedBbox(bbox, trans1) tbbox2 = TransformedBbox(bbox, trans2) prop_patches = kwargs.copy() prop_patches['ec'] = 'none' prop_patches['alpha'] = 0.1 c1, c2, bbox_patch1, bbox_patch2, p = \ connect_bboxes(tbbox1, tbbox2, loc1a=3, loc2a=2, loc1b=4, loc2b=1, prop_lines=kwargs, prop_patches=prop_patches) ax1.add_patch(bbox_patch1) ax2.add_patch(bbox_patch2) ax2.add_patch(c1) ax2.add_patch(c2) ax2.add_patch(p) return c1, c2, bbox_patch1, bbox_patch2, p def plot_locus(x, y, seg_x=None, seg_y=None, xlim=None, genes=None, yref=0, downsample=(1,1)): gs = gridspec.GridSpec(3, 1, height_ratios=[1, 0.75, 1]) fig = plt.figure() fig.patch.set(facecolor='w') main_ax = plot_sample_profile(x, y, seg_x, seg_y, gs[0, :], yref=yref, downsample=downsample[0]) main_ax.xaxis.set_major_formatter(ticker.FuncFormatter(coordinate_mbp)) # zoomed-in profile ax = plot_sample_profile(x, y, seg_x, seg_y, gs[1, :], yref=yref, downsample=downsample[1]) main_ax.set_xlim(left=x[0], right=x[-1]) ax.grid(True) genes_ax = None if genes is not None: genes_ax = plot_mrnas(genes, gs[2, :], ax) if xlim is None: xlim = x[0], x[-1] win_size = xlim[1] - xlim[0] + 1 ax.set_xlim(left=xlim[0] - win_size*0.05, right=xlim[1] + win_size*0.05) ax.xaxis.set_major_formatter(ticker.FuncFormatter(coordinate_mbp)) zoom_effect(main_ax, ax, xlim, lw=0, color='green') return main_ax, ax, genes_ax def main(): geneDatabase = cn.GeneDatabase('refGene.db') gregion = cn.GenomicRegion('chr5:121000000-122000000', geneDatabase=geneDatabase) unit = 1e4 x = np.arange(gregion.start, gregion.end+unit, unit) y = np.hstack( [np.random.randn(21)+1, np.random.randn(30)-1, np.random.randn(50)+2] ) seg_x = np.matrix([ [x[0], x[20]], [x[20], x[50]], [x[50], x[-1]] ]) seg_y = np.array([1, -1, 2]) genes = get_gene_regions(gregion) xlim = (121110000, 121510000) main_ax, ax, genes_ax = plot_locus(x, y, seg_x, seg_y, xlim, genes) main_ax.set(ylabel='DNA copy-number') plt.show() if __name__ == '__main__': main()
gpl-3.0
1,947,092,367,173,622,500
24.0375
122
0.668497
false
nan86150/ImageFusion
lib/python2.7/site-packages/scipy/_lib/_version.py
65
4792
"""Utility to compare (Numpy) version strings. The NumpyVersion class allows properly comparing numpy version strings. The LooseVersion and StrictVersion classes that distutils provides don't work; they don't recognize anything like alpha/beta/rc/dev versions. """ import re from scipy._lib.six import string_types __all__ = ['NumpyVersion'] class NumpyVersion(): """Parse and compare numpy version strings. Numpy has the following versioning scheme (numbers given are examples; they can be >9) in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. - Beta: '1.8.0b1', '1.8.0b2', etc. - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other `NumpyVersion` instance. Parameters ---------- vstring : str Numpy version string (``np.__version__``). Notes ----- All dev versions of the same (pre-)release compare equal. Examples -------- >>> from scipy._lib._version import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0'): ... print('skip') skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" """ def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") self.version = ver_main.group() self.major, self.minor, self.bugfix = [int(x) for x in self.version.split('.')] if len(vstring) == ver_main.end(): self.pre_release = 'final' else: alpha = re.match(r'a\d', vstring[ver_main.end():]) beta = re.match(r'b\d', vstring[ver_main.end():]) rc = re.match(r'rc\d', vstring[ver_main.end():]) pre_rel = [m for m in [alpha, beta, rc] if m is not None] if pre_rel: self.pre_release = pre_rel[0].group() else: self.pre_release = '' self.is_devversion = bool(re.search(r'.dev', vstring)) def _compare_version(self, other): """Compare major.minor.bugfix""" if self.major == other.major: if self.minor == other.minor: if self.bugfix == other.bugfix: vercmp = 0 elif self.bugfix > other.bugfix: vercmp = 1 else: vercmp = -1 elif self.minor > other.minor: vercmp = 1 else: vercmp = -1 elif self.major > other.major: vercmp = 1 else: vercmp = -1 return vercmp def _compare_pre_release(self, other): """Compare alpha/beta/rc/final.""" if self.pre_release == other.pre_release: vercmp = 0 elif self.pre_release == 'final': vercmp = 1 elif other.pre_release == 'final': vercmp = -1 elif self.pre_release > other.pre_release: vercmp = 1 else: vercmp = -1 return vercmp def _compare(self, other): if not isinstance(other, (string_types, NumpyVersion)): raise ValueError("Invalid object to compare with NumpyVersion.") if isinstance(other, string_types): other = NumpyVersion(other) vercmp = self._compare_version(other) if vercmp == 0: # Same x.y.z version, check for alpha/beta/rc vercmp = self._compare_pre_release(other) if vercmp == 0: # Same version and same pre-release, check if dev version if self.is_devversion is other.is_devversion: vercmp = 0 elif self.is_devversion: vercmp = -1 else: vercmp = 1 return vercmp def __lt__(self, other): return self._compare(other) < 0 def __le__(self, other): return self._compare(other) <= 0 def __eq__(self, other): return self._compare(other) == 0 def __ne__(self, other): return self._compare(other) != 0 def __gt__(self, other): return self._compare(other) > 0 def __ge__(self, other): return self._compare(other) >= 0 def __repr(self): return "NumpyVersion(%s)" % self.vstring
mit
985,191,422,128,744,100
29.916129
79
0.536728
false
rtrwalker/geotecha
geotecha/speccon/test/test_speccon1d_vrw.py
1
176841
# geotecha - A software suite for geotechncial engineering # Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/gpl.html. """Some test routines for the speccon_1d_vert_radial_boundary_well resistance module """ from __future__ import division, print_function from nose import with_setup from nose.tools.trivial import assert_almost_equal from nose.tools.trivial import assert_raises from nose.tools.trivial import ok_ from numpy.testing import assert_allclose import unittest from math import pi import numpy as np import textwrap import matplotlib.pyplot as plt from geotecha.piecewise.piecewise_linear_1d import PolyLine from geotecha.speccon.speccon1d_vrw import Speccon1dVRW import geotecha.mathematics.transformations as transformations TERZ1D_Z = np.array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]) TERZ1D_T = np.array([0.008, 0.018, 0.031, 0.049, 0.071, 0.096, 0.126, 0.159, 0.197, 0.239, 0.286, 0.34, 0.403, 0.477, 0.567, 0.684, 0.848, 1.129, 1.781]) TERZ1D_POR = np.array( [[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [ 0.5708047 , 0.40183855, 0.31202868, 0.25060581, 0.209277 , 0.18051017, 0.15777238, 0.1401947 , 0.12492869, 0.11139703, 0.09868545, 0.08618205, 0.07371295, 0.0613951 , 0.04916581, 0.03683692, 0.02457785, 0.01228656, 0.00245901], [ 0.8861537 , 0.70815945, 0.57815202, 0.47709676, 0.40440265, 0.35188372, 0.30934721, 0.27584089, 0.24631156, 0.21986645, 0.19487593, 0.17022241, 0.145606 , 0.12127752, 0.09712088, 0.07276678, 0.04855051, 0.02427058, 0.00485748], [ 0.98229393, 0.8861537 , 0.77173068, 0.66209592, 0.57402972, 0.50633278, 0.44919934, 0.40274312, 0.36079264, 0.322593 , 0.28615206, 0.25003642, 0.21390512, 0.17817202, 0.14268427, 0.10690487, 0.07132769, 0.03565698, 0.00713634], [ 0.9984346 , 0.96498502, 0.89182244, 0.79866319, 0.71151086, 0.63842889, 0.57300943, 0.5173496 , 0.46536864, 0.41697458, 0.37024076, 0.32365106, 0.27692667, 0.23067729, 0.18473404, 0.13841059, 0.09234855, 0.04616539, 0.00923947], [ 0.99992277, 0.99159201, 0.95536184, 0.8897753 , 0.81537699, 0.74554825, 0.67795464, 0.61693194, 0.55750293, 0.50070214, 0.44507671, 0.38925529, 0.33311924, 0.27750057, 0.22223477, 0.16650815, 0.11109548, 0.05553705, 0.0111151 ], [ 0.9999979 , 0.9984346 , 0.9840325 , 0.94470726, 0.88846498, 0.82769841, 0.76271322, 0.69962982, 0.63517948, 0.57181325, 0.50885214, 0.44524423, 0.38110176, 0.3174894 , 0.25426314, 0.19050572, 0.12710688, 0.0635412 , 0.01271704], [ 0.99999997, 0.99977515, 0.99506515, 0.97461982, 0.93621426, 0.88684221, 0.82720628, 0.76436582, 0.69689722, 0.62871883, 0.5600537 , 0.49025645, 0.41969701, 0.34965996, 0.28003063, 0.2098124 , 0.13998847, 0.06998076, 0.01400585], [ 1. , 0.99997517, 0.99868444, 0.9892702 , 0.96479424, 0.92594095, 0.87215551, 0.81066205, 0.74161724, 0.67020692, 0.59748729, 0.52320368, 0.44795959, 0.37322105, 0.29890288, 0.2239528 , 0.14942309, 0.07469716, 0.01494978], [ 1. , 0.99999789, 0.99968908, 0.99551731, 0.97956541, 0.94796078, 0.89856843, 0.83840947, 0.76868357, 0.69543129, 0.62029292, 0.54329327, 0.46519818, 0.3875934 , 0.31041531, 0.23257876, 0.15517842, 0.07757427, 0.0155256 ], [ 1. , 0.99999973, 0.99988166, 0.9971974 , 0.98407824, 0.95504225, 0.90726835, 0.8476479 , 0.77774256, 0.70389411, 0.62795246, 0.55004364, 0.47099154, 0.39242376, 0.31428453, 0.23547787, 0.15711273, 0.07854125, 0.01571913]]) TERZ1D_AVP = np.array( [[ 0.8990747 , 0.84861205, 0.80132835, 0.75022262, 0.69933407, 0.65038539, 0.59948052, 0.55017049, 0.49966188, 0.44989787, 0.40039553, 0.35035814, 0.2998893 , 0.24983377, 0.20008097, 0.14990996, 0.10002108, 0.05000091, 0.01000711]]) def test_terzaghi_1d_PTIB(): """test for terzaghi 1d PTIB dTv turns out to be 1.0 Pervious top impervious bottom instant surcharge of 100 """ reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 1 dTv = 0.1 neig = 20 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_depth = PolyLine([0,1], [100,100]) surcharge_vs_time = PolyLine([0,0.0,8], [0,1,1]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(TERZ1D_Z), repr(TERZ1D_T))) por = 100 * TERZ1D_POR avp = 100 * TERZ1D_AVP settle = 100 * (1 - TERZ1D_AVP) for impl in ["vectorized", "fortran"]: for dT in [0.1, 1, 10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTIB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTIB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTIB, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_terzaghi_1d_PTPB(): """test for terzaghi 1d PTPB dTv turns out to be 1.0 Pervious top pervious bottom instant surcharge of 100 """ reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 0 dTv = 0.1 * 0.25 neig = 20 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_depth = PolyLine([0,1], [100,100]) surcharge_vs_time = PolyLine([0,0.0,8], [0,1,1]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(np.append(0.5*TERZ1D_Z, 1 - 0.5*TERZ1D_Z[::-1])), repr(TERZ1D_T))) por = 100 * np.vstack((TERZ1D_POR, TERZ1D_POR[::-1,:])) avp = 100 * TERZ1D_AVP settle = 100 * (1 - TERZ1D_AVP) for impl in ["vectorized", "fortran"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # print(a.por) assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTPB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTPB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_terzaghi_1d_PTPB, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_BC_terzaghi_1d_PTIB(): """test for boundary condition immitation of terzaghi 1d, PTIB. dTv turns out to be 1.0 Pervious top impervious bottom imitates surcharge of 100. i.e. top BC reduces instantly to -100 """ reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 1 dTv = 0.1 neig = 20 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) top_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(TERZ1D_Z), repr(TERZ1D_T))) por = 100 * TERZ1D_POR - 100 avp = 100 * TERZ1D_AVP - 100 settle = 100 * (1 - TERZ1D_AVP) for impl in ["vectorized", "fortran"]: for dT in [0.1, 1, 10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTIB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTIB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTIB, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_BC_terzaghi_1d_PTPB(): """test for boundary condition imitation of terzaghi 1d, PTPB. dTv turns out to be 1.0 Pervious top pervious bottom imitates surcharge of 100. i.e. top and bot BC reduces instantly to -100 """ reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 0 dTv = 0.1 * 0.25 neig = 20 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) top_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) bot_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(np.append(0.5*TERZ1D_Z, 1 - 0.5*TERZ1D_Z[::-1])), repr(TERZ1D_T))) por = 100 * np.vstack((TERZ1D_POR, TERZ1D_POR[::-1,:])) - 100 avp = 100 * TERZ1D_AVP - 100 settle = 100 * (1 - TERZ1D_AVP) for impl in [ "vectorized", "fortran"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # print(a.por) assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTPB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTPB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_BC_terzaghi_1d_PTPB, settle, " "implementation='%s', dT=%s" % (impl, dT))) # def test_schiffman_and_stein_1970(): """test for multilayer vertical consolidation example as per Schiffman and stein 1970 """ reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #<start params from Schiffman and stein h = np.array([10, 20, 30, 20]) cv = np.array([0.0411, 0.1918, 0.0548, 0.0686]) mv = np.array([3.07e-3, 1.95e-3, 9.74e-4, 1.95e-3]) #kv = np.array([7.89e-6, 2.34e-5, 3.33e-6, 8.35e-6]) kv = cv*mv bctop = 0 #htop = None #ktop = None bcbot = 0 #hbot = None #kbot = None n = 25 surcharge_vs_time = [PolyLine([0,0,10], [0,100,100])] #end params from Schiffman and stein> H = np.sum(h) z2 = np.cumsum(h) / H z1 = (np.cumsum(h) - h) / H mvref = mv[0] kvref = kv[0] drn = 0 dTv = 1 / H**2 * kvref / mvref neig = 60 mv = PolyLine(z1, z2, mv/mvref, mv/mvref) kv = PolyLine(z1, z2, kv/kvref, kv/kvref) dTh=1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,30000], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 1. , 2. , 3. , 4. , 5. , 6. , 7. , 8. , 9. , 10. , 12. , 14. , 16. , 18. , 20. , 22. , 24. , 26. , 28. , 30. , 33. , 36. , 39. , 42. , 45. , 48. , 51. , 54. , 57. , 60. , 62.22222222, 64.44444444, 66.66666667, 68.88888889, 71.11111111, 73.33333333, 75.55555556, 77.77777778, 80. ])/H tvals=np.array( [1.21957046e+02, 1.61026203e+02, 2.12611233e+02, 2.80721620e+02, 3.70651291e+02, 4.89390092e+02, 740.0, 8.53167852e+02, 1.12648169e+03, 1.48735211e+03, 1.96382800e+03, 2930.0, 3.42359796e+03, 4.52035366e+03, 5.96845700e+03, 7195.0, 1.04049831e+04, 1.37382380e+04, 1.81393069e+04, 2.39502662e+04, 3.16227766e+04]) ppress_z_tval_indexes=[6, 11, 15] avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] """) t = np.array( [1.21957046e+02, 1.61026203e+02, 2.12611233e+02, 2.80721620e+02, 3.70651291e+02, 4.89390092e+02, 740.0, 8.53167852e+02, 1.12648169e+03, 1.48735211e+03, 1.96382800e+03, 2930.0, 3.42359796e+03, 4.52035366e+03, 5.96845700e+03, 7195.0, 1.04049831e+04, 1.37382380e+04, 1.81393069e+04, 2.39502662e+04, 3.16227766e+04]) z = np.array( [ 0. , 0.4, 0.8, 1.2, 1.6, 2. , 2.4, 2.8, 3.2, 3.6, 4. , 4.4, 4.8, 5.2, 5.6, 6. , 6.4, 6.8, 7.2, 7.6, 8. , 8.4, 8.8, 9.2, 9.6, 10. , 10.8, 11.6, 12.4, 13.2, 14. , 14.8, 15.6, 16.4, 17.2, 18. , 18.8, 19.6, 20.4, 21.2, 22. , 22.8, 23.6, 24.4, 25.2, 26. , 26.8, 27.6, 28.4, 29.2, 30. , 31.2, 32.4, 33.6, 34.8, 36. , 37.2, 38.4, 39.6, 40.8, 42. , 43.2, 44.4, 45.6, 46.8, 48. , 49.2, 50.4, 51.6, 52.8, 54. , 55.2, 56.4, 57.6, 58.8, 60. , 80. ]) avp = np.array([ [ 92.76130612, 91.67809644, 90.42431165, 88.96340161, 87.24639479, 85.20740028, 81.3788705 , 79.77961148, 76.12942592, 71.64722094, 66.18237931, 56.39858151, 51.98404777, 43.386903 , 34.191069 , 27.95321684, 16.51473866, 9.56939964, 4.65849181, 1.80156111, 0.51403147]]) settle = np.array([ [ 1.41200092, 1.6224817 , 1.86433805, 2.14224976, 2.4616197 , 2.82878775, 3.48058021, 3.73910018, 4.30338166, 4.95707122, 5.71168747, 6.98645472, 7.54031351, 8.59560031, 9.70441392, 10.45107157, 11.81714408, 12.64703246, 13.23440044, 13.57631953, 13.73045647]]) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.03239323e+01, 5.57722764e+00, 2.72863702e+00], [ 2.04927512e+01, 1.11292772e+01, 5.44654181e+00], [ 3.03596123e+01, 1.66311548e+01, 8.14302495e+00], [ 3.97935136e+01, 2.20582323e+01, 1.08074825e+01], [ 4.86855554e+01, 2.73864218e+01, 1.34294382e+01], [ 5.69534067e+01, 3.25923412e+01, 1.59985851e+01], [ 6.45436877e+01, 3.76534683e+01, 1.85048266e+01], [ 7.14321862e+01, 4.25482803e+01, 2.09383162e+01], [ 7.76220298e+01, 4.72563776e+01, 2.32894965e+01], [ 8.31401151e+01, 5.17585907e+01, 2.55491366e+01], [ 8.63829254e+01, 5.46252500e+01, 2.69970037e+01], [ 8.91243107e+01, 5.72907091e+01, 2.83539334e+01], [ 9.14062350e+01, 5.97466005e+01, 2.96153647e+01], [ 9.32743935e+01, 6.19856150e+01, 3.07770593e+01], [ 9.47754352e+01, 6.40015052e+01, 3.18351159e+01], [ 9.59545034e+01, 6.57890773e+01, 3.27859823e+01], [ 9.68531745e+01, 6.73441748e+01, 3.36264667e+01], [ 9.75078308e+01, 6.86636534e+01, 3.43537482e+01], [ 9.79484713e+01, 6.97453488e+01, 3.49653850e+01], [ 9.81979376e+01, 7.05880378e+01, 3.54593221e+01], [ 9.93247463e+01, 7.72482246e+01, 3.95369875e+01], [ 9.97711949e+01, 8.20552178e+01, 4.25682125e+01], [ 9.99281809e+01, 8.50582178e+01, 4.44716699e+01], [ 9.99714904e+01, 8.63004510e+01, 4.51947246e+01], [ 9.99590815e+01, 8.57994747e+01, 4.47146245e+01], [ 9.98733033e+01, 8.35381707e+01, 4.30391462e+01], [ 9.96067213e+01, 7.94677707e+01, 4.02066990e+01], [ 9.88907136e+01, 7.35225677e+01, 3.62858635e+01], [ 9.71725283e+01, 6.56443116e+01, 3.13743194e+01], [ 9.34796129e+01, 5.58128098e+01, 2.55970977e+01], [ 9.11831418e+01, 5.22050000e+01, 2.36222413e+01], [ 8.71521621e+01, 4.77829652e+01, 2.13561648e+01], [ 8.11139375e+01, 4.25889537e+01, 1.88257516e+01], [ 7.28265056e+01, 3.66856767e+01, 1.60614922e+01], [ 6.21545873e+01, 3.01556450e+01, 1.30971084e+01], [ 4.91434216e+01, 2.30996016e+01, 9.96911691e+00], [ 3.40694327e+01, 1.56340897e+01, 6.71633983e+00], [ 1.74495002e+01, 7.88825146e+00, 3.37936978e+00], [ 7.44980896e-12, 2.91898853e-12, 7.07416666e-13]]) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.figure() # plt.plot(por, z,'b-*') # plt.plot(a.por, z, 'r-+') # # # plt.figure() # plt.plot(t,settle[0],'b-*') # plt.plot(t, a.set[0], 'r-+') # plt.figure() # plt.plot(t, avp[0],'b-*') # plt.plot(t, a.avp[0], 'r-+') # plt.show() #atol is quite high for these but looking at comparative plots #they are ok. assert_allclose(a.por, por, atol=1, err_msg = ("Fail. test_schiffman_and_stein_1970, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. test_schiffman_and_stein_1970, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_schiffman_and_stein_1970, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_fixed_ppress_terzaghi_PTPB(): """test for fixed_ppress fixed pore pressure is zero at 0.5, each half is equivalent to terzaghi_1d PTPB instant surcharge of 100 close to the fixed ppress zero is not perfectly accurate but it is reasonable """ tslice = slice(5,None) #restrict times zslice = slice(2,None) # restrict zvals t = TERZ1D_T[tslice] z = np.append(0.25*TERZ1D_Z[zslice], [0.5 - 0.25*TERZ1D_Z[zslice][::-1], 0.5 + 0.25*TERZ1D_Z[zslice], 1 - 0.25 * TERZ1D_Z[zslice][::-1]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 0 dTv = 0.1 /16 neig = 40 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_depth = PolyLine([0,1], [100,100]) surcharge_vs_time = PolyLine([0,0.0,8], [0,1,1]) fixed_ppress = [(0.5, 10000, None)] ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(z), repr(t))) por = 100 * np.vstack((TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:], TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:])) avp = 100 * TERZ1D_AVP[:, tslice] settle = 100 * (1 - TERZ1D_AVP[:,tslice]) for impl in ["vectorized"]: for dT in [0.1, 1, 10]: a = Speccon1dVRW((reader + "\n" + "implementation = '{}'".format(impl) + "\n" + "dT = {}".format(dT))) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*') # plt.plot(a.por, z, 'r-+') # # # plt.figure() # plt.plot(t,settle[0],'b-*') # plt.plot(t, a.set[0], 'r-+') # plt.figure() # plt.plot(t, avp[0],'b-*') # plt.plot(t, a.avp[0], 'r-+') # plt.show() assert_allclose(a.avp, avp, atol=2, err_msg = ("Fail. test_fixed_ppress_terzaghi_PTPB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=5, err_msg = ("Fail. test_fixed_ppress_terzaghi_PTPB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=2, err_msg = ("Fail. test_fixed_ppress_terzaghi_PTPB, settle, " "implementation='%s', dT=%s" % (impl, dT))) # def test_fixed_ppress_BC_terzaghi_PTPB(): """test for fixed_ppress fixed pore pressure is -100 at 0.5. fixed boundary conditions are instantly -100. each half is equivalent to terzaghi_1d PTPB -100. instant surcharge of 100 close to the fixed ppress zero is not perfectly accurate but it is reasonable """ tslice = slice(5,None) #restrict times zslice = slice(2,None) # restrict zvals t = TERZ1D_T[tslice] z = np.append(0.25*TERZ1D_Z[zslice], [0.5 - 0.25*TERZ1D_Z[zslice][::-1], 0.5 + 0.25*TERZ1D_Z[zslice], 1 - 0.25 * TERZ1D_Z[zslice][::-1]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 0 dTv = 0.1 /16 neig = 40 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) #surcharge_vs_depth = PolyLine([0,1], [100,100]) #surcharge_vs_time = PolyLine([0,0.0,8], [0,1,1]) top_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) bot_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) fixed_ppress = [(0.5, 10000, PolyLine([0,0,10],[0,-100,-100]))] ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(z), repr(t))) por = -100 + 100 * np.vstack((TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:], TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:])) avp = -100 + 100 * TERZ1D_AVP[:, tslice] settle = 100 * (1 - TERZ1D_AVP[:,tslice]) for impl in ["vectorized"]: for dT in [0.1, 1, 10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*') # plt.plot(a.por, z, 'r-+') # # # plt.figure() # plt.plot(t,settle[0],'b-*') # plt.plot(t, a.set[0], 'r-+') # plt.figure() # plt.plot(t, avp[0],'b-*') # plt.plot(t, a.avp[0], 'r-+') # plt.show() assert_allclose(a.avp, avp, atol=2, err_msg = ("Fail. test_fixed_ppress_BC_terzaghi_PTPB, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=5, err_msg = ("Fail. test_fixed_ppress_BC_terzaghi_PTPB, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=2, err_msg = ("Fail. test_fixed_ppress_BC_terzaghi_PTPB, settle, " "implementation='%s', dT=%s" % (impl, dT))) # def test_hansbo_avp(): """test for average hansbo radial consolidation instant surcharge of 100 compare with 100*exp(t) tolerance is quite large because method is not great when no vertical drainage is present. """ t = np.array( [ 0.05, 0.06, 0.08, 0.1 , 0.13, 0.17, 0.21, 0.27, 0.35, 0.44, 0.57, 0.72, 0.92, 1.17, 1.49, 1.9 , 2.42, 3.09, 3.93, 5.01]) hansbo_avp = np.array( [[ 0.95122942, 0.94176453, 0.92311635, 0.90483742, 0.87809543, 0.84366482, 0.81058425, 0.76337949, 0.70468809, 0.64403642, 0.56552544, 0.48675226, 0.39851904, 0.31036694, 0.22537266, 0.14956862, 0.08892162, 0.04550195, 0.01964367, 0.0066709 ]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 1 dTh = 0.1 neig = 60 mvref = 2.0 mv = PolyLine([0, 1], [0.5, 0.5]) kh = PolyLine([0, 1], [5, 5]) et = PolyLine([0,1], [1, 1]) #note: combo of dTv, mv, kv essentially gives dTv = 1 surcharge_vs_depth = PolyLine([0,1], [100,100]) surcharge_vs_time = PolyLine([0,0.0,8], [0,1,1]) dTw=1000 kwref=1 kw = PolyLine([0,1], [1,1]) avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(t))) avp = 100 * hansbo_avp settle = 100 - 100 * hansbo_avp for impl in ["vectorized"]: for dT in [0.1, 1, 10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # # # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. test_hansbo_avp, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_hansbo_avp, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_hansbo_avp_vacuum(): """test for average hansbo radial consolidation BC and vacuum drop instantly to -100 compare with 100*exp(t)-100 tolerance is quite large because method is not great when no vertical drainage is present. """ t = np.array( [ 0.05, 0.06, 0.08, 0.1 , 0.13, 0.17, 0.21, 0.27, 0.35, 0.44, 0.57, 0.72, 0.92, 1.17, 1.49, 1.9 , 2.42, 3.09, 3.93, 5.01]) hansbo_avp = np.array( [[ 0.95122942, 0.94176453, 0.92311635, 0.90483742, 0.87809543, 0.84366482, 0.81058425, 0.76337949, 0.70468809, 0.64403642, 0.56552544, 0.48675226, 0.39851904, 0.31036694, 0.22537266, 0.14956862, 0.08892162, 0.04550195, 0.01964367, 0.0066709 ]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 1 dTh = 0.1 neig = 60 mvref = 2.0 mv = PolyLine([0, 1], [0.5, 0.5]) kh = PolyLine([0, 1], [5, 5]) et = PolyLine([0,1], [1, 1]) #note: combo of dTv, mv, kv essentially gives dTv = 1 #vacuum_vs_depth = PolyLine([0,1], [1,1]) #vacuum_vs_time = PolyLine([0,0.0,8], [0,-100,-100]) dTw=1000 kwref=1 kw = PolyLine([0,1], [1,1]) top_vs_time = PolyLine([0,0.0,8], [0,-100,-100]) avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(t))) avp = 100 * hansbo_avp - 100 settle = 100 - 100 * hansbo_avp for impl in ["vectorized"]: for dT in [0.1,1,10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # # # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. test_hansbo_avp_vacuum, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_hansbo_avp_vacuum, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_terzaghi_1d_PTPB_bot_BC_gradient(): """test for terzaghi 1d PTPB simulated by specifying pore pressure gradient at bottom top BC drops to -100 instantly gradient at bot BC is prescribed should be same as terzaghi PTPB - 100 """ flow_t = np.array([ 0, 0.00000000e+00, 1.00000000e-05, 1.32571137e-05, 1.75751062e-05, 2.32995181e-05, 3.08884360e-05, 4.09491506e-05, 5.42867544e-05, 7.19685673e-05, 9.54095476e-05, 1.26485522e-04, 1.67683294e-04, 2.22299648e-04, 2.94705170e-04, 3.90693994e-04, 5.17947468e-04, 6.86648845e-04, 9.10298178e-04, 1.20679264e-03, 1.59985872e-03, 2.12095089e-03, 2.81176870e-03, 3.72759372e-03, 4.94171336e-03, 6.55128557e-03, 8.68511374e-03, 1.15139540e-02, 1.52641797e-02, 2.02358965e-02, 2.68269580e-02, 3.55648031e-02, 4.71486636e-02, 6.25055193e-02, 8.28642773e-02, 1.09854114e-01, 1.45634848e-01, 1.93069773e-01, 2.55954792e-01, 3.39322177e-01, 4.49843267e-01, 5.96362332e-01, 7.90604321e-01, 1.04811313e+00, 1.38949549e+00, 1.84206997e+00, 2.44205309e+00, 3.23745754e+00, 4.29193426e+00, 5.68986603e+00, 7.54312006e+00, 1.00000000e+01]) # flow_v comes from terzaghi_1d_flowrate(z=np.array([0.0]), t=flow_t[tslice], kv=10, mv=1, gamw=10, ui=100, nterms=500) flow_v = -np.array([ 0.00000000e+00, 1.00000000e+05, 1.78412412e+04, 1.54953209e+04, 1.34578624e+04, 1.16883065e+04, 1.01514272e+04, 8.81663000e+03, 7.65734340e+03, 6.65048985e+03, 5.77602610e+03, 5.01654435e+03, 4.35692582e+03, 3.78403963e+03, 3.28648146e+03, 2.85434652e+03, 2.47903242e+03, 2.15306785e+03, 1.86996392e+03, 1.62408493e+03, 1.41053624e+03, 1.22506677e+03, 1.06398442e+03, 9.24082570e+02, 8.02576220e+02, 6.97046575e+02, 6.05392880e+02, 5.25790600e+02, 4.56655118e+02, 3.96610163e+02, 3.44460438e+02, 2.99167808e+02, 2.59830644e+02, 2.25665819e+02, 1.95991124e+02, 1.70184572e+02, 1.47532018e+02, 1.26954815e+02, 1.07034205e+02, 8.66871910e+01, 6.59246745e+01, 4.59181293e+01, 2.84338280e+01, 1.50624045e+01, 6.48748315e+00, 2.12376806e+00, 4.83256782e-01, 6.78952680e-02, 5.03366995e-03, 1.59915607e-04, 1.65189842e-06, 3.84807183e-09]) # flow_t =np.array([ 0.0, 0.00000000e+00, 1.00000000e-04, 2.03503287e-04, # 4.14135879e-04, 8.42780126e-04, 1.71508526e-03, # 3.49025488e-03, 7.10278341e-03, 1.44543977e-02, # 2.94151745e-02, 5.98608469e-02, 1.21818791e-01, # 2.47905244e-01, 5.04495321e-01, 1.02666456e+00, # 2.08929613e+00]) # flow_v = -np.array([ 0, 2.00000000e+05, 1.12837917e+04, 7.90986998e+03, # 5.54477121e+03, 3.88685121e+03, 2.72465929e+03, # 1.90996975e+03, 1.33887729e+03, 9.38544911e+02, # 6.57914329e+02, 4.61193930e+02, 3.23118191e+02, # 2.18601587e+02, 1.15205739e+02, 3.17620214e+01, # 2.30788836e+00]) z = np.append(0.5*TERZ1D_Z, 1 - 0.5*TERZ1D_Z[::-1]) t = TERZ1D_T reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 1 dTv = 1 * 0.25 neig = 15 mvref = 1.0 mv = PolyLine([0,1], [1,1]) kv = PolyLine([0,1], [1,1]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) top_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) bot_vs_time = PolyLine([0, 0.0, 5], [0,-100,-100]) bot_vs_time = PolyLine(np.%s, np.%s) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(flow_t), repr(flow_v*2), repr(z),repr(t))) # we use flow_v*2 because flow_v on it's own is for flowrate of # terzaghi PTIB where h=H = 1. for this test we have basically have 2 layers # each of h=0.5. Thus we divide dTv by 4. The flow_v data is du/dz. # because H was one du/dz = du/Dz. when h=0.5 we need to multiply flow_v # 2 to get the same gradient at the base por = 100 * np.vstack((TERZ1D_POR, TERZ1D_POR[::-1,:])) - 100 avp = 100 * TERZ1D_AVP - 100 settle = 100 * (1 - TERZ1D_AVP) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # slope = (a.por[-1,:]-a.por[-2,:]) / (a.ppress_z[-1]-a.ppress_z[-2]) # print(repr(t)) # print(repr(slope)) # print(a.por) # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z,lw=2) ## plt.plot(a.por, z, 'r-+', label='calculated') # plt.gca().invert_yaxis() # plt.legend() # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=2, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_terzaghi_1d_pumping(): """test for terzaghi 1d PTPB simulated by pumping at mid depth surcharge of 100 pumping at mid depth such that pore press at mid depth is zero top half should be same as terzaghi 1d PTPB, bottom half should be same as terzaghi 1d PTPB. but H is now 1/4 of terzaghi H """ flow_t = np.array([ 0, 0.00000000e+00, 1.00000000e-05, 1.32571137e-05, 1.75751062e-05, 2.32995181e-05, 3.08884360e-05, 4.09491506e-05, 5.42867544e-05, 7.19685673e-05, 9.54095476e-05, 1.26485522e-04, 1.67683294e-04, 2.22299648e-04, 2.94705170e-04, 3.90693994e-04, 5.17947468e-04, 6.86648845e-04, 9.10298178e-04, 1.20679264e-03, 1.59985872e-03, 2.12095089e-03, 2.81176870e-03, 3.72759372e-03, 4.94171336e-03, 6.55128557e-03, 8.68511374e-03, 1.15139540e-02, 1.52641797e-02, 2.02358965e-02, 2.68269580e-02, 3.55648031e-02, 4.71486636e-02, 6.25055193e-02, 8.28642773e-02, 1.09854114e-01, 1.45634848e-01, 1.93069773e-01, 2.55954792e-01, 3.39322177e-01, 4.49843267e-01, 5.96362332e-01, 7.90604321e-01, 1.04811313e+00, 1.38949549e+00, 1.84206997e+00, 2.44205309e+00, 3.23745754e+00, 4.29193426e+00, 5.68986603e+00, 7.54312006e+00, 1.00000000e+01]) # flow_v comes from terzaghi_1d_flowrate(z=np.array([0.0]), t=flow_t[tslice], kv=10, mv=1, gamw=10, ui=100, nterms=500) flow_v = -np.array([ 0.00000000e+00, 1.00000000e+05, 1.78412412e+04, 1.54953209e+04, 1.34578624e+04, 1.16883065e+04, 1.01514272e+04, 8.81663000e+03, 7.65734340e+03, 6.65048985e+03, 5.77602610e+03, 5.01654435e+03, 4.35692582e+03, 3.78403963e+03, 3.28648146e+03, 2.85434652e+03, 2.47903242e+03, 2.15306785e+03, 1.86996392e+03, 1.62408493e+03, 1.41053624e+03, 1.22506677e+03, 1.06398442e+03, 9.24082570e+02, 8.02576220e+02, 6.97046575e+02, 6.05392880e+02, 5.25790600e+02, 4.56655118e+02, 3.96610163e+02, 3.44460438e+02, 2.99167808e+02, 2.59830644e+02, 2.25665819e+02, 1.95991124e+02, 1.70184572e+02, 1.47532018e+02, 1.26954815e+02, 1.07034205e+02, 8.66871910e+01, 6.59246745e+01, 4.59181293e+01, 2.84338280e+01, 1.50624045e+01, 6.48748315e+00, 2.12376806e+00, 4.83256782e-01, 6.78952680e-02, 5.03366995e-03, 1.59915607e-04, 1.65189842e-06, 3.84807183e-09]) tslice = slice(5,-2) #restrict times zslice = slice(1,None) # restrict zvals t = TERZ1D_T[tslice] z = np.append(0.25*TERZ1D_Z[zslice], [0.5 - 0.25*TERZ1D_Z[zslice][::-1], 0.5 + 0.25*TERZ1D_Z[zslice], 1 - 0.25 * TERZ1D_Z[zslice][::-1]]) # z = np.append(0.5*TERZ1D_Z, 1 - 0.5*TERZ1D_Z[::-1]) # t = TERZ1D_T reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = 0 dTv = 0.1 /16 neig = 40 mvref = 2.0 mv = PolyLine([0,1], [0.5,0.5]) kv = PolyLine([0,1], [5,5]) #dTv = 1/16 #mvref = 1.0 #mv = PolyLine([0,1], [1,1]) #kv = PolyLine([0,1], [1,1]) #note: combo of dTv, mv, kv essentially gives dTv = 1 dTh=1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0, 0.0, 10], [0,100,100]) surcharge_vs_depth = PolyLine([0, 1], [1,1]) pumping = (0.5, PolyLine(np.%s, np.%s)) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s """ % (repr(flow_t), repr(2*flow_v/4), repr(z),repr(t))) # we use 2*flow_v/4 because flow_v on it's own is for flowrate of # terzaghi PTIB where H = 1. for this test we have basically have 4 layers # each of H=0.25. Thus we divide dTv by 16. because our pump is # extracting for a quarter of the height we divide the original flow_v # by 4. But because we are using a single pump to drain both the top and # bottom halves we then multiply by 2. This gives us our 2*flow_v/4 por = 100 * np.vstack((TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:], TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:])) avp = 100 * TERZ1D_AVP[:, tslice] settle = 100 * (1 - TERZ1D_AVP[:,tslice]) # por = 100 * np.vstack((TERZ1D_POR, TERZ1D_POR[::-1,:])) - 100 # avp = 100 * TERZ1D_AVP - 100 # settle = 100 * (1 - TERZ1D_AVP) #Note here that the pore pressure at z = 0.5 is slightly off. for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # slope = (a.por[-1,:]-a.por[-2,:]) / (a.ppress_z[-1]-a.ppress_z[-2]) # print(repr(t)) # print(repr(slope)) # print(a.por) # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z,lw=2) ## plt.plot(a.por, z, 'r-+', label='calculated') # plt.gca().invert_yaxis() # plt.legend() # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=2, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_terzaghi_1d_PTPB_bot_BC_gradient, settle, " "implementation='%s', dT=%s" % (impl, dT))) # class test_omega_phase(unittest.TestCase): """compare omega_phase loads to equivalent piecewise""" ##To get the piecewise approximation of a mag_vs_time_PolyLIne use: ## #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #from geotecha.inputoutput.inputoutput import PrefixNumpyArrayString #import geotecha.piecewise.piecewise_linear_1d as pwise # #PrefixNumpyArrayString().turn_on() # #vs_depth = PolyLine([0,1], [1,1]) #vs_time = PolyLine([0,1,2.0], [0,10,10]) #omega_phase = (2*np.pi*0.1, 0) # #omega, phase = omega_phase #x, y = pwise.subdivide_x_y_into_segments(vs_time.x, vs_time.y, dx=0.1) # #y = y * np.cos(omega * x + phase) #v_time = PolyLine(x, y) #print(v_time) #reader is generic input file with named parameters: # drn, use_actual, load_to_test. reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np H = 1 drn = %(drn)d dT = 1 dTh = 1 dTv = 0.1 * 0.25 neig = 10 mvref = 2.0 kvref = 1.0 khref = 1.0 etref = 1.0 mv = PolyLine([0,1], [0.5,0.5]) kh = PolyLine([0,1], [1,1]) kv = PolyLine([0,1], [5,5]) et = PolyLine([0,1], [1,1]) dTw = 0.1 kwref=1 kw = PolyLine([0,1], [1,1]) vs_depth = PolyLine([0,1], [1,1]) vs_time = PolyLine([0,1,2.0], [0,10,10]) omega_phase = (2*np.pi*0.1, 0) use_actual = %(use_actual)s load_to_test = '%(load_to_test)s' if use_actual: if load_to_test=='surcharge': surcharge_vs_depth = vs_depth surcharge_vs_time = vs_time surcharge_omega_phase = omega_phase if load_to_test=='vacuum': vacuum_vs_depth = vs_depth vacuum_vs_time = vs_time vacuum_omega_phase = omega_phase if load_to_test=='top': top_vs_time = vs_time top_omega_phase = omega_phase if load_to_test=='bot': bot_vs_time = vs_time bot_omega_phase = omega_phase if load_to_test=='fixed_ppress': fixed_ppress = (0.2, 1000, vs_time) fixed_ppress_omega_phase = omega_phase if load_to_test=='pumping': pumping = (0.4, vs_time) pumping_omega_phase = omega_phase else: #approximate the sinusoidal loading with a piecewise load v_time = PolyLine(np.array([[ 0. , 0. ], [ 0.1 , 0.99802673], [ 0.2 , 1.9842294 ], [ 0.3 , 2.94686175], [ 0.4 , 3.87433264], [ 0.5 , 4.75528258], [ 0.6 , 5.57865892], [ 0.7 , 6.33378937], [ 0.8 , 7.01045344], [ 0.9 , 7.59895133], [ 1. , 8.09016994], [ 1.1 , 7.70513243], [ 1.2 , 7.28968627], [ 1.3 , 6.84547106], [ 1.4 , 6.3742399 ], [ 1.5 , 5.87785252], [ 1.6 , 5.35826795], [ 1.7 , 4.81753674], [ 1.8 , 4.25779292], [ 1.9 , 3.68124553], [ 2. , 3.09016994]])) if load_to_test=='surcharge': surcharge_vs_depth = vs_depth surcharge_vs_time = v_time surcharge_omega_phase = None if load_to_test=='vacuum': vacuum_vs_depth = vs_depth vacuum_vs_time = v_time vacuum_omega_phase = None if load_to_test=='top': top_vs_time = v_time top_omega_phase = None if load_to_test=='bot': bot_vs_time = v_time bot_omega_phase = None if load_to_test=='fixed_ppress': fixed_ppress = (0.2, 1000, v_time) fixed_ppress_omega_phase = None if load_to_test=='pumping': pumping = (0.4, v_time) pumping_omega_phase = None ppress_z = np.linspace(0,1,20) avg_ppress_z_pairs = [[0,1],[0.4, 0.5]] settlement_z_pairs = [[0,1],[0.4, 0.5]] #tvals = np.logspace(-2, 0.3,50) tvals = np.linspace(0.01, 2, 50) ppress_z_tval_indexes = np.arange(len(tvals))[::len(tvals)//7] #avg_ppress_z_pairs_tval_indexes = slice(None,None)#[0,4,6] #settlement_z_pairs_tval_indexes = slice(None, None)#[0,4,6] implementation='vectorized' #RLzero = -12.0 #plot_properties={} """) def test_surcharge(self): """test surcharge""" drn=0 load_to_test='surcharge' a = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) b = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) a.make_all() b.make_all() assert_allclose(a.por, b.por, atol=1e-2) assert_allclose(a.avp, b.avp, atol=1e-2) assert_allclose(a.set, b.set, atol=1e-2) # def test_vacuum(self): # """test vacuum""" # drn=0 # load_to_test='vacuum' # # a = Speccon1dVRW(self.reader % # {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) # b = Speccon1dVRW(self.reader % # {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) # a.make_all() # b.make_all() # assert_allclose(a.por, b.por, atol=1e-2) # assert_allclose(a.avp, b.avp, atol=1e-2) # assert_allclose(a.set, b.set, atol=1e-2) def test_top(self): """test top""" drn=0 load_to_test='top' a = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) b = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) a.make_all() b.make_all() assert_allclose(a.por, b.por, atol=1e-1) assert_allclose(a.avp, b.avp, atol=1e-2) assert_allclose(a.set, b.set, atol=1e-2) def test_bot(self): """test bot""" drn=0 load_to_test='bot' a = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) b = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) a.make_all() b.make_all() assert_allclose(a.por, b.por, atol=1e-1) assert_allclose(a.avp, b.avp, atol=1e-2) assert_allclose(a.set, b.set, atol=1e-2) def test_bot_gradient(self): """test bot gradient""" drn=1 load_to_test='bot' a = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) b = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) a.make_all() b.make_all() assert_allclose(a.por, b.por, atol=1e-2) assert_allclose(a.avp, b.avp, atol=1e-2) assert_allclose(a.set, b.set, atol=1e-2) # def test_fixed_ppress(self): # """test fixed_ppress""" # drn=0 # load_to_test='fixed_ppress' # # a = Speccon1dVRW(self.reader % # {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) # b = Speccon1dVRW(self.reader % # {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) # a.make_all() # b.make_all() # assert_allclose(a.por, b.por, atol=1e-2) # assert_allclose(a.avp, b.avp, atol=1e-2) # assert_allclose(a.set, b.set, atol=1e-2) def test_pumping(self): """test pumping""" drn=0 load_to_test='pumping' a = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': True, 'load_to_test': load_to_test}) b = Speccon1dVRW(self.reader % {'drn': drn, 'use_actual': False, 'load_to_test': load_to_test}) a.make_all() b.make_all() assert_allclose(a.por, b.por, atol=1e-2) assert_allclose(a.avp, b.avp, atol=1e-2) assert_allclose(a.set, b.set, atol=1e-2) def test_nogamiandli2003_lam_5(): """test for nogami and li 2003 lambda = 5 nogami and li use rigorous formulation, speccon uses equal strain so expect differences """ t = np.array([ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np ################################################ #nogami and li parameters surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) hs=0.05 h = np.array([1, hs, hs, 1, hs, hs, 0.5]) lam = 5 kv = np.array([1,lam/hs, lam/hs, 1, lam/hs, lam/hs, 1]) mv = np.array([1.0, 1, 1, 1, 1, 1, 1]) kh = kv r0 = 0.05 r1 = 20 * r0 #z = layer_coords(h, 45,2) bctop = 0 bcbot = 1 nv = 15 nh = 5 tpor = np.array([0.01,0.1, 0.4]) z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) t = np.array( [ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) max_iter=20000 vertical_roots_x0 = 2.2 vertical_roots_dx = 1e-3 vertical_roots_p = 1.01 ################################################ z2 = np.cumsum(h) z1 = z2-h H = np.sum(h) z1/=H z2/=H kv = PolyLine(z1, z2, kv, kv) mv = PolyLine(z1, z2, mv, mv) kh = kv drn = 1 neig=50 mvref=1.0 surcharge_vs_depth = mv #rw=0.05, re = 20*rw = 1.0, n=20, no smear zone #Therfore muI=2.253865374, eta = 2/mu/re**2 = 0.887364446 etref = 0.887364446 et = PolyLine(z1, z2, np.ones_like(z1), np.ones_like(z1)) dTv = 1/H**2 dTh = etref dTw=1000 kwref=1 kw = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) ppress_z/=H avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.array( [ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) ppress_z_tval_indexes = [0, 5, 9] #0.01, 0.1, 0.4 """) por = np.array( [[ 0. , 0. , 0. ], [ 32.94991031, 9.30846577, 1.06072593], [ 60.50469133, 18.35862945, 2.09682932], [ 79.52571707, 26.90168552, 3.08429341], [ 90.06180426, 34.70710083, 4.00029715], [ 94.55282914, 41.57006481, 4.82377368], [ 96.04098984, 47.31721713, 5.5359229 ], [ 96.63719346, 51.81051011, 6.12066479], [ 97.07580625, 54.94931228, 6.56502187], [ 97.27065296, 56.67105285, 6.85942107], [ 97.1057093 , 56.95080968, 6.99790774], [ 96.72063876, 55.80024278, 6.9782666 ], [ 96.14739652, 53.26618066, 6.8020474 ], [ 94.76324343, 49.42901062, 6.47449533], [ 91.15886034, 44.40084879, 6.00438888], [ 83.63522467, 38.3233171 , 5.40379048], [ 71.02182882, 31.36466998, 4.68771724], [ 70.97596237, 31.34317255, 4.68582079], [ 70.96055716, 31.33737197, 4.68627617], [ 70.9756061 , 31.34726572, 4.68908363], [ 71.02113267, 31.37285944, 4.6942446 ], [ 83.61246044, 39.36666485, 6.22480907], [ 91.38126902, 46.54039404, 7.6281786 ], [ 95.44742589, 52.78332249, 8.87779047], [ 97.18636227, 58.00520529, 9.9502025 ], [ 97.77106952, 62.13581189, 10.8255135 ], [ 97.95566599, 65.12364104, 11.48771358], [ 98.07498151, 66.93424716, 11.92495889], [ 98.17356755, 67.54865883, 12.12976682], [ 98.18150818, 66.96232237, 12.09912898], [ 98.05090064, 65.18486623, 11.83454088], [ 97.75273273, 62.24078238, 11.34194855], [ 97.08501041, 58.17089632, 10.63161376], [ 95.39424089, 53.03429149, 9.71790087], [ 91.45890546, 46.91020634, 8.61898956], [ 83.7556734 , 39.89936258, 7.35651899], [ 71.08436042, 32.12422375, 5.95517002], [ 71.03838507, 32.09989551, 5.95082665], [ 71.0230861 , 32.09179329, 5.94949046], [ 71.03844883, 32.09991289, 5.95116079], [ 71.08449479, 32.12425873, 5.9558385 ], [ 83.78763806, 39.90383793, 7.44085772], [ 91.57940299, 46.91911482, 8.78729168], [ 95.64743846, 53.04766406, 9.97071357], [ 97.44492495, 58.18889925, 10.9696657 ], [ 98.14320956, 62.26387924, 11.76604301], [ 98.4208958 , 65.21406514, 12.34541665], [ 98.54986983, 66.99953518, 12.69729194], [ 98.59179084, 67.59720313, 12.81529574]]) avp = np.array( [[ 88.32365031, 84.02918765, 78.13336593, 70.18369459, 59.71491547, 48.2730443 , 46.5276473 , 31.41366172, 16.85941031, 7.82517078, 6.2871533 , 1.31568525]]) settle = np.array( [[ 31.52614416, 43.12119333, 59.03991198, 80.50402461, 108.76972824, 139.66278038, 144.37535228, 185.18311334, 224.47959217, 248.87203889, 253.02468609, 266.44764984]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.por, por, atol=5, err_msg = ("Fail. test_nogamiandli2003_lam_5, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.avp, avp, atol=2, err_msg = ("Fail. test_nogamiandli2003_lam_5, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=4, err_msg = ("Fail. test_nogamiandli2003_lam_5, set, " "implementation='%s', dT=%s" % (impl, dT))) # def test_nogamiandli2003_lam_100(): """test for nogami and li 2003 lambda = 100 nogami and li use rigorous formulation, speccon uses equal strain so expect differences """ t = np.array([ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np ################################################ #nogami and li parameters surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) hs=0.05 h = np.array([1, hs, hs, 1, hs, hs, 0.5]) lam = 100 kv = np.array([1,lam/hs, lam/hs, 1, lam/hs, lam/hs, 1]) mv = np.array([1.0, 1, 1, 1, 1, 1, 1]) kh = kv r0 = 0.05 r1 = 20 * r0 #z = layer_coords(h, 45,2) bctop = 0 bcbot = 1 nv = 15 nh = 5 tpor = np.array([0.01,0.1, 0.4]) z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) t = np.array( [ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) max_iter=20000 vertical_roots_x0 = 2.2 vertical_roots_dx = 1e-3 vertical_roots_p = 1.01 ################################################ z2 = np.cumsum(h) z1 = z2-h H = np.sum(h) z1/=H z2/=H kv = PolyLine(z1, z2, kv, kv) mv = PolyLine(z1, z2, mv, mv) kh = kv drn = 1 neig=80 mvref=1.0 surcharge_vs_depth = mv #rw=0.05, re = 20*rw = 1.0, n=20, no smear zone #Therfore muI=2.253865374, eta = 2/mu/re**2 = 0.887364446 etref = 0.887364446 et = PolyLine(z1, z2, np.ones_like(z1), np.ones_like(z1)) dTv = 1/H**2 dTh = etref dTw=1000 kwref=1 kw = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.0625, 0.125 , 0.1875, 0.25 , 0.3125, 0.375 , 0.4375, 0.5 , 0.5625, 0.625 , 0.6875, 0.75 , 0.8125, 0.875 , 0.9375, 1. , 1.025 , 1.05 , 1.075 , 1.1 , 1.1625, 1.225 , 1.2875, 1.35 , 1.4125, 1.475 , 1.5375, 1.6 , 1.6625, 1.725 , 1.7875, 1.85 , 1.9125, 1.975 , 2.0375, 2.1 , 2.125 , 2.15 , 2.175 , 2.2 , 2.2625, 2.325 , 2.3875, 2.45 , 2.5125, 2.575 , 2.6375, 2.6999 ]) ppress_z/=H avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.array( [ 0.01 , 0.01603286, 0.02570525, 0.04121285, 0.06607597, 0.1, 0.10593866, 0.16984993, 0.27231794, 0.4, 0.43660343, 0.7 ]) ppress_z_tval_indexes = [0, 5, 9] #0.01, 0.1, 0.4 """) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 3.30085802e+01, 8.39070407e+00, 3.58707855e-01], [ 6.05040116e+01, 1.64657476e+01, 7.03954600e-01], [ 7.93733840e+01, 2.39216010e+01, 1.02278441e+00], [ 8.98276000e+01, 3.04784367e+01, 1.30323304e+00], [ 9.44368466e+01, 3.58906380e+01, 1.53477694e+00], [ 9.61739263e+01, 3.99558431e+01, 1.70872820e+00], [ 9.69096134e+01, 4.25222425e+01, 1.81856068e+00], [ 9.72060228e+01, 4.34939559e+01, 1.86015483e+00], [ 9.70563632e+01, 4.28344044e+01, 1.83195234e+00], [ 9.64675854e+01, 4.05676349e+01, 1.73501446e+00], [ 9.51338553e+01, 3.67775792e+01, 1.57298210e+00], [ 9.15678072e+01, 3.16052348e+01, 1.35193903e+00], [ 8.28664682e+01, 2.52437753e+01, 1.08018340e+00], [ 6.59357468e+01, 1.79316479e+01, 7.67916132e-01], [ 3.96855349e+01, 9.94380297e+00, 4.26857907e-01], [ 6.59655749e+00, 1.58132211e+00, 6.98091328e-02], [ 6.59134099e+00, 1.58005846e+00, 6.97557684e-02], [ 6.58961224e+00, 1.57964519e+00, 6.97399004e-02], [ 6.59137041e+00, 1.58008208e+00, 6.97615203e-02], [ 6.59661708e+00, 1.58136940e+00, 6.98206398e-02], [ 3.98547617e+01, 1.00628681e+01, 4.55528866e-01], [ 6.63377268e+01, 1.81728261e+01, 8.24478652e-01], [ 8.35306351e+01, 2.56128437e+01, 1.16311926e+00], [ 9.24242085e+01, 3.21098488e+01, 1.45901394e+00], [ 9.60705684e+01, 3.74261238e+01, 1.70129661e+00], [ 9.74392519e+01, 4.13677993e+01, 1.88107072e+00], [ 9.81005003e+01, 4.37915763e+01, 1.99173594e+00], [ 9.83479591e+01, 4.46095565e+01, 2.02923036e+00], [ 9.81079539e+01, 4.37921401e+01, 1.99217957e+00], [ 9.74436874e+01, 4.13689901e+01, 1.88194701e+00], [ 9.60657415e+01, 3.74280676e+01, 1.70258384e+00], [ 9.24170918e+01, 3.21127333e+01, 1.46068005e+00], [ 8.35319659e+01, 2.56169153e+01, 1.16512240e+00], [ 6.63469036e+01, 1.81783823e+01, 8.26768047e-01], [ 3.98602199e+01, 1.00702443e+01, 4.58045763e-01], [ 6.59182174e+00, 1.59091774e+00, 7.24995316e-02], [ 6.58658179e+00, 1.58963382e+00, 7.24411588e-02], [ 6.58484730e+00, 1.58920618e+00, 7.24217283e-02], [ 6.58661723e+00, 1.58963460e+00, 7.24412296e-02], [ 6.59189335e+00, 1.59091932e+00, 7.24996732e-02], [ 4.00379871e+01, 1.00742370e+01, 4.58399695e-01], [ 6.66669810e+01, 1.81862162e+01, 8.27472042e-01], [ 8.39318565e+01, 2.56282977e+01, 1.16616892e+00], [ 9.28276558e+01, 3.21272424e+01, 1.46205756e+00], [ 9.64383871e+01, 3.74451724e+01, 1.70427658e+00], [ 9.77664761e+01, 4.13880757e+01, 1.88393471e+00], [ 9.83983815e+01, 4.38125372e+01, 1.99443713e+00], [ 9.86319025e+01, 4.46305720e+01, 2.03172745e+00]]) avp = np.array( [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 73.98565603, 90.40509513, 110.94385476, 136.47024905, 167.51755212, 198.09275262, 202.40333078, 235.20722417, 257.98695445, 266.80558772, 267.81478229, 269.8577381 ]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.por, por, atol=13, err_msg = ("Fail. test_nogamiandli2003_lam_100, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.avp, avp, atol=5, err_msg = ("Fail. test_nogamiandli2003_lam_100, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=7, err_msg = ("Fail. test_nogamiandli2003_lam_100, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn0_kv_linear_mv_const(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case mv=mv0 PTPB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 0 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 0 #p = 1 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 0 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,2]) mv = PolyLine([0,1], [1,1]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.45637289e+01, 7.93407071e+00, 1.72266349e+00, 5.06825357e-01], [ 2.78786258e+01, 1.51937492e+01, 3.29853310e+00, 9.70461303e-01], [ 3.95929298e+01, 2.15886617e+01, 4.68603058e+00, 1.37867434e+00], [ 4.94637465e+01, 2.69822591e+01, 5.85540859e+00, 1.72271221e+00], [ 5.73451924e+01, 3.12869817e+01, 6.78769539e+00, 1.99699341e+00], [ 6.31732721e+01, 3.44589624e+01, 7.47352321e+00, 2.19876233e+00], [ 6.69501080e+01, 3.64925266e+01, 7.91190221e+00, 2.32772855e+00], [ 6.87296115e+01, 3.74146824e+01, 8.10898721e+00, 2.38570374e+00], [ 6.86058287e+01, 3.72797427e+01, 8.07687246e+00, 2.37624707e+00], [ 6.67043109e+01, 3.61641803e+01, 7.83244041e+00, 2.30432633e+00], [ 6.31761472e+01, 3.41617884e+01, 7.39628296e+00, 2.17600049e+00], [ 5.81938563e+01, 3.13791946e+01, 6.79170747e+00, 1.99812723e+00], [ 5.19481922e+01, 2.79317585e+01, 6.04383529e+00, 1.77809769e+00], [ 4.46450073e+01, 2.39398742e+01, 5.17879661e+00, 1.52359956e+00], [ 3.65015772e+01, 1.95256817e+01, 4.22302247e+00, 1.24240884e+00], [ 2.77421116e+01, 1.48101921e+01, 3.20263266e+00, 9.42209774e-01], [ 1.85924877e+01, 9.91081790e+00, 2.14291617e+00, 6.30442069e-01], [ 9.27449220e+00, 4.93929752e+00, 1.06789988e+00, 3.14174014e-01], [ 4.17631424e-12, 2.26493642e-12, 4.90759409e-13, 1.44383480e-13]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 2.43103297, 119.32510052, 168.14191428, 202.97841422, 228.55495738, 247.38377244, 261.24954126, 271.46110823, 278.98165371, 284.52037457, 288.59953574, 291.60376277, 293.81632162, 295.44583147, 296.64593629, 297.52979203]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW((reader + "\n" + "implementation = '{}'".format(impl) + "\n" + "dT = {}".format(dT))) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_const, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_const, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_const, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn1_kv_linear_mv_const(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case mv=mv0 PTIB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 1 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 0 #p = 1 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 1 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,2]) mv = PolyLine([0,1], [1,1]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0. , 0. , 0. , 0. ], [ 15.18444088, 11.10894698, 7.40806725, 5.65866747], [ 29.21825868, 21.52741854, 14.393854 , 10.99614641], [ 41.86741966, 31.18979916, 20.94166598, 16.00146099], [ 53.00073922, 40.05518511, 27.04142859, 20.66754327], [ 62.58176447, 48.10531202, 32.68786111, 24.99062132], [ 70.65301836, 55.34166198, 37.87975403, 28.96970795], [ 77.31639526, 61.78209413, 42.61933358, 32.60616995], [ 82.71294612, 67.45729525, 46.91170217, 35.90336248], [ 87.00441979, 72.40728918, 50.76434639, 38.86631703], [ 90.35798578, 76.67818396, 54.18670618, 41.50147367], [ 92.93472805, 80.3192787 , 57.18979959, 43.81645046], [ 94.88186164, 83.38060239, 59.78589829, 45.81984409], [ 96.32821071, 85.9109158 , 61.98824926, 47.52105729], [ 97.38227721, 87.95617524, 63.81083812, 48.93014883], [ 98.1321791 , 89.55843377, 65.26819006, 50.05770314], [ 98.64679433, 90.75513955, 66.37520386, 50.91471679], [ 98.97756401, 91.57878173, 67.14701531, 51.51249953], [ 99.16054817, 92.05682986, 67.59888601, 51.862588 ], [ 99.21846412, 92.21191229, 67.74611382, 51.97667046]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 1.00721992, 51.02212219, 73.064841 , 90.33221466, 105.12195388, 118.31183726, 130.33109045, 141.41484463, 151.70343029, 161.28848225, 170.23575019, 178.59664384, 186.4141468 , 193.72588488, 200.56574999, 206.96478826]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_const, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_const, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_const, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn0_kv_const_mv_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case kv=kv0 PTPB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 0 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 1 #p = 0 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 0 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,1]) mv = PolyLine([0,1], [1,2]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array([[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.54526693e+01, 1.09324170e+01, 5.45432949e+00, 3.12385435e+00], [ 3.04146954e+01, 2.16470695e+01, 1.08043650e+01, 6.18772293e+00], [ 4.44006882e+01, 3.19132040e+01, 1.59384135e+01, 9.12738341e+00], [ 5.69796067e+01, 4.14931978e+01, 2.07391114e+01, 1.18753937e+01], [ 6.78211642e+01, 5.01510045e+01, 2.50865613e+01, 1.43629200e+01], [ 7.67238893e+01, 5.76602545e+01, 2.88619716e+01, 1.65218535e+01], [ 8.36163878e+01, 6.38112411e+01, 3.19517313e+01, 1.82871712e+01], [ 8.85292651e+01, 6.84163589e+01, 3.42518199e+01, 1.95994813e+01], [ 9.15424970e+01, 7.13140823e+01, 3.56724239e+01, 2.04076724e+01], [ 9.27199192e+01, 7.23721783e+01, 3.61425945e+01, 2.06715704e+01], [ 9.20474065e+01, 7.14913358e+01, 3.56147594e+01, 2.03644897e+01], [ 8.93932711e+01, 6.86105734e+01, 3.40688657e+01, 1.94755543e+01], [ 8.45078598e+01, 6.37154707e+01, 3.15159206e+01, 1.80116555e+01], [ 7.70734988e+01, 5.68494130e+01, 2.80006785e+01, 1.59989093e+01], [ 6.68048929e+01, 4.81267528e+01, 2.36032269e+01, 1.34834831e+01], [ 5.35839387e+01, 3.77453917e+01, 1.84392403e+01, 1.05316710e+01], [ 3.75948100e+01, 2.59952204e+01, 1.26587025e+01, 7.22911840e+00], [ 1.94124730e+01, 1.32586059e+01, 6.44295503e+00, 3.67912598e+00], [ -3.45068593e-14, -1.97857435e-14, -8.69186348e-15, -4.90979514e-15]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 3.57704735, 120.04921228, 170.08668445, 208.48781421, 240.52536911, 268.00075818, 291.78168717, 312.42809025, 330.37165232, 345.97183673, 359.53650775, 371.33191184, 381.58908098, 390.50873519, 398.26534318, 405.01058755]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_const_mv_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn0_kv_const_mv_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_const_mv_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn1_kv_const_mv_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case kv=kv0 PTIB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 1 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 1 #p = 0 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 1 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,1]) mv = PolyLine([0,1], [1,2]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0. , 0. , 0. , 0. ], [ 15.45888274, 11.25256128, 7.81026213, 6.59018061], [ 30.43068308, 22.32963838, 15.56683296, 13.14658973], [ 44.4354633 , 33.04917896, 23.21234569, 19.63280111], [ 57.05195463, 43.23209565, 30.68678233, 26.00996498], [ 67.96788342, 52.71408077, 37.92918048, 32.23741176], [ 77.01434637, 61.35697402, 44.87949524, 38.2733285 ], [ 84.17673582, 69.05832424, 51.48050894, 44.07548095], [ 89.58036923, 75.75791659, 57.67966088, 49.60194716], [ 93.45534202, 81.44039358, 63.43065826, 54.81182438], [ 96.09001616, 86.13363155, 68.69472915, 59.66586883], [ 97.78432712, 89.90314892, 73.4413916 , 64.12702968], [ 98.81236575, 92.84339272, 77.64864003, 68.16084386], [ 99.39948403, 95.0671534 , 81.30248985, 71.73566822], [ 99.71432767, 96.69450875, 84.39587248, 74.82273882], [ 99.87247181, 97.84256704, 86.92693088, 77.39606414], [ 99.94666509, 98.6169071 , 88.89682733, 79.43217778], [ 99.97898153, 99.1050939 , 90.30723423, 80.90979557], [ 99.99163793, 99.37210731, 91.15773026, 81.80943988], [ 99.9948905 , 99.45708676, 91.44336375, 82.11310762]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 1.48203385, 50.94661452, 72.8626314 , 89.97524417, 104.5927801 , 117.61098055, 129.48992505, 140.50305227, 150.82806371, 160.58734407, 169.86851905, 178.73606076, 187.23840576, 195.41260211, 203.28747828, 210.88586589]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_const_mv_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn1_kv_const_mv_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_const_mv_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn0_kv_linear_mv_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case PTPB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 0 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 1 #p = 1 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 0 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,2]) mv = PolyLine([0,1], [1,2]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.62512314e+01, 1.07457907e+01, 3.84160979e+00, 1.67218369e+00], [ 3.12321729e+01, 2.06934529e+01, 7.39486876e+00, 3.21878437e+00], [ 4.46159170e+01, 2.96501455e+01, 1.05883768e+01, 4.60866933e+00], [ 5.61568702e+01, 3.74501715e+01, 1.33611971e+01, 5.81528170e+00], [ 6.56952104e+01, 4.39574427e+01, 1.56639060e+01, 6.81710096e+00], [ 7.31489554e+01, 4.90673778e+01, 1.74593972e+01, 7.59799347e+00], [ 7.84964687e+01, 5.27082371e+01, 1.87234166e+01, 8.14744312e+00], [ 8.17543474e+01, 5.48419309e+01, 1.94448120e+01, 8.46065565e+00], [ 8.29565248e+01, 5.54643442e+01, 1.96254925e+01, 8.53853409e+00], [ 8.21399323e+01, 5.46052032e+01, 1.92800950e+01, 8.38752580e+00], [ 7.93403412e+01, 5.23274722e+01, 1.84353661e+01, 8.01934544e+00], [ 7.45994455e+01, 4.87262199e+01, 1.71292755e+01, 7.45058093e+00], [ 6.79814251e+01, 4.39268564e+01, 1.54098798e+01, 6.70219250e+00], [ 5.95947547e+01, 3.80826216e+01, 1.33339668e+01, 5.79891783e+00], [ 4.96134660e+01, 3.13712176e+01, 1.09655124e+01, 4.76859794e+00], [ 3.82918130e+01, 2.39905233e+01, 8.37398825e+00, 3.64144101e+00], [ 2.59674561e+01, 1.61534102e+01, 5.63256373e+00, 2.44924221e+00], [ 1.30506509e+01, 8.08177394e+00, 2.81624514e+00, 1.22457875e+00], [ 3.63287486e-14, 4.09473739e-14, 1.63287722e-14, 7.13624388e-15]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 3.64504448, 148.20259183, 209.3368549 , 255.32464212, 292.06381677, 321.7801835 , 345.88327216, 365.44839429, 381.33428266, 394.23438072, 404.71050089, 413.21838104, 420.12792413, 425.739451 , 430.29682587, 433.99808524]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_linear_mv_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn1_kv_linear_mv_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case PTIB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 1 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 1 #q = 1 #p = 1 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 1 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine([0,1], [1,2]) mv = PolyLine([0,1], [1,2]) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0. , 0. , 0. , 0. ], [ 16.37146015, 12.2738666 , 8.84546454, 7.43834356], [ 31.5074054 , 23.79150445, 17.2062873 , 14.47509718], [ 45.12640436, 34.47333568, 25.07537157, 21.10931735], [ 57.04026068, 44.25775233, 32.4458375 , 27.33909292], [ 67.16521634, 53.10456242, 39.31181908, 33.16205864], [ 75.51945528, 60.99668435, 45.6690481 , 38.57579977], [ 82.20890991, 67.94011853, 51.51524375, 43.57816742], [ 87.40509838, 73.96235989, 56.85032632, 48.16751987], [ 91.31947167, 79.10952461, 61.67647461, 52.34290194], [ 94.17850768, 83.44253908, 65.99804905, 56.10417281], [ 96.2027854 , 87.03277091, 69.8214049 , 59.45209115], [ 97.59187767, 89.9574734 , 73.15462183, 62.38836579], [ 98.51550262, 92.29537114, 76.00717725, 64.91567929], [ 99.11026291, 94.12264673, 78.38959142, 67.03769123], [ 99.48062621, 95.50950908, 80.3130709 , 68.75902746], [ 99.70256333, 96.5174444 , 81.78917543, 70.08526077], [ 99.82836475, 97.19718251, 82.82952989, 71.02288785], [ 99.89146014, 97.5873598 , 83.44559918, 71.57930625], [ 99.91043614, 97.71382984, 83.64853883, 71.76279462]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 1.21583113, 52.56272601, 76.10323202, 94.84376502, 111.09114323, 125.73965256, 139.24586843, 151.87600354, 163.79688456, 175.11790375, 185.91346217, 196.23605561, 206.12424666, 215.60762381, 224.70991703, 233.45096577]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_linear_mv_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) # def test_zhuandyin2012_drn0_kv_mv_non_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case PTPB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 0 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 0.5 #q = 2 #p = -2 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 0 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 0.90702948, 0.82644628, 0.75614367, 0.69444444, 0.64 , 0.59171598, 0.54869684, 0.51020408, 0.47562426, 0.44444444])) mv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 1.1025, 1.21 , 1.3225, 1.44 , 1.5625, 1.69 , 1.8225, 1.96 , 2.1025, 2.25 ])) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.46735407e+01, 1.03912749e+01, 6.41229682e+00, 4.57268569e+00], [ 2.95788346e+01, 2.11321198e+01, 1.30795985e+01, 9.32820812e+00], [ 4.40987535e+01, 3.19912441e+01, 1.99046557e+01, 1.41983660e+01], [ 5.75828248e+01, 4.26939050e+01, 2.67633017e+01, 1.90957668e+01], [ 6.94433436e+01, 5.29361414e+01, 3.35030492e+01, 2.39125719e+01], [ 7.92552649e+01, 6.24046262e+01, 3.99428658e+01, 2.85200193e+01], [ 8.68301825e+01, 7.07988643e+01, 4.58744872e+01, 3.27690394e+01], [ 9.22365664e+01, 7.78502690e+01, 5.10656237e+01, 3.64923102e+01], [ 9.57527634e+01, 8.33312726e+01, 5.52654037e+01, 3.95081180e+01], [ 9.77603095e+01, 8.70482108e+01, 5.82123900e+01, 4.16263817e+01], [ 9.86004778e+01, 8.88153120e+01, 5.96455150e+01, 4.26571486e+01], [ 9.84164540e+01, 8.84142419e+01, 5.93183109e+01, 4.24217705e+01], [ 9.69914128e+01, 8.55535605e+01, 5.70168132e+01, 4.07667995e+01], [ 9.35945553e+01, 7.98525651e+01, 5.25814092e+01, 3.75803906e+01], [ 8.68970325e+01, 7.08792914e+01, 4.59325274e+01, 3.28106427e+01], [ 7.51182570e+01, 5.82658943e+01, 3.70992525e+01, 2.64848548e+01], [ 5.66168535e+01, 4.18996669e+01, 2.62486163e+01, 1.87281176e+01], [ 3.09602450e+01, 2.21444807e+01, 1.37115752e+01, 9.77906130e+00], [ -6.18241273e-14, -2.23931281e-14, -8.15431343e-15, -5.67041685e-15]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 3.8496919 , 98.88619572, 139.84619549, 171.27505923, 197.75770538, 221.03013705, 241.9290118 , 260.91094338, 278.25424995, 294.1490242 , 308.73922326, 322.14274325, 334.46119179, 345.78480236, 356.19504202, 365.76611184]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-1, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=2, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn1_kv_mv_non_linear(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case PTIB """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 1 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 0.5 #q = 2 #p = -2 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 1 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 0.90702948, 0.82644628, 0.75614367, 0.69444444, 0.64 , 0.59171598, 0.54869684, 0.51020408, 0.47562426, 0.44444444])) mv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 1.1025, 1.21 , 1.3225, 1.44 , 1.5625, 1.69 , 1.8225, 1.96 , 2.1025, 2.25 ])) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) surcharge_vs_depth = PolyLine([0,1], [1,1]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = np.array( [[ 0. , 0. , 0. , 0. ], [ 14.67355514, 10.40528932, 6.9478055 , 5.78180724], [ 29.57888041, 21.1645055 , 14.20340783, 11.8343646 ], [ 44.09888584, 32.05180943, 21.70257559, 18.12252389], [ 57.58320674, 42.8013085 , 29.36397652, 24.60042608], [ 69.44444839, 53.12388946, 37.08988223, 31.21115238], [ 79.25845477, 62.73135179, 44.76833267, 37.88694384], [ 86.83932182, 71.36544094, 52.27696814, 44.55012872], [ 92.26237958, 78.82739783, 59.48856982, 51.11485949], [ 95.82413544, 85.0022523 , 66.27810786, 57.48969044], [ 97.95205743, 89.87205113, 72.53079584, 63.58092097], [ 99.09709856, 93.51401605, 78.15033325, 69.29648861], [ 99.64623111, 96.08312814, 83.06624662, 74.55003194], [ 99.87831495, 97.78286166, 87.23908646, 79.26457494], [ 99.96373069, 98.83118188, 90.66226567, 83.3751421 ], [ 99.99076102, 99.43000313, 93.35956014, 86.82953547], [ 99.99801788, 99.74450265, 95.37770132, 89.58653348], [ 99.99964732, 99.89475021, 96.77397903, 91.61094907], [ 99.99994823, 99.95773745, 97.59921715, 92.86534606], [ 99.99998809, 99.97464489, 97.8768125 , 93.29878245]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [ [ 1.92503833, 49.44309786, 69.92309956, 85.63795758, 98.88619572, 110.55812783, 121.11036104, 130.81414062, 139.84619549, 148.32927168, 156.35271013, 163.98389265, 171.27505923, 178.26759418, 184.99484499, 191.48405032]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-1, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_mv_non_linear, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn1_kv_mv_non_linear, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=2, err_msg = ("Fail. test_zhuandyin2012_drn1_kv_mv_non_linear, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_zhuandyin2012_drn0_kv_mv_non_linear_BC(): """test for zhu and yin 2012 vertical drainage, depth dependent properties, instant load generally: mv = mv0*(1+alpha*z/H)**q kv = kv0* (1+alpha*z/H)**p for this case PTPB replicate with negative BC """ t = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) z = np.array([ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) tpor=t[np.array([2,4,9,13])] reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np #################################### #zhuandyin2012 properties #ui = 100 #drn = 0 #nterms = 50 #mv0 = 1.2 #kv0 = 1.6 #H = 2.5 #alpha = 0.5 #q = 2 #p = -2 #z = np.linspace(0,H,20) #t = np.linspace(0,15,16) #tpor=t[np.array([2,4,9,13])] #plot_eigs=False # #por, doc, settle = zhuandyin2012( # z=z, t=t, alpha=alpha, p=p, q=q, drn=drn, tpor=tpor, H = H, kv0 = kv0, mv0 = mv0, gamw = 10, # ui = 100, nterms = nterms, plot_eigs=plot_eigs) #################################### neig=40 H = 2.5 drn = 0 mvref = 1.2 kvref = 1.6 / 10 kv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 0.90702948, 0.82644628, 0.75614367, 0.69444444, 0.64 , 0.59171598, 0.54869684, 0.51020408, 0.47562426, 0.44444444])) mv = PolyLine(np.array( [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]), np.array( [ 1. , 1.1025, 1.21 , 1.3225, 1.44 , 1.5625, 1.69 , 1.8225, 1.96 , 2.1025, 2.25 ])) dTv = kvref/mvref/H**2 dTh=0.1 dTw=0 khref = 1 etref = 1 kwref=1 kw = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) #surcharge_vs_time = PolyLine([0,0,10], [0,100,100]) #surcharge_vs_depth = PolyLine([0,1], [1,1]) top_vs_time = PolyLine([0,0,10], [0,-100,-100]) bot_vs_time = PolyLine([0,0,10], [0,-100,-100]) ppress_z = np.array( [ 0. , 0.13157895, 0.26315789, 0.39473684, 0.52631579, 0.65789474, 0.78947368, 0.92105263, 1.05263158, 1.18421053, 1.31578947, 1.44736842, 1.57894737, 1.71052632, 1.84210526, 1.97368421, 2.10526316, 2.23684211, 2.36842105, 2.5 ]) ppress_z/=H settlement_z_pairs = [[0,1]] tvals = np.array( [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]) ppress_z_tval_indexes = [2,4,9,13] """) por = -100+np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.46735407e+01, 1.03912749e+01, 6.41229682e+00, 4.57268569e+00], [ 2.95788346e+01, 2.11321198e+01, 1.30795985e+01, 9.32820812e+00], [ 4.40987535e+01, 3.19912441e+01, 1.99046557e+01, 1.41983660e+01], [ 5.75828248e+01, 4.26939050e+01, 2.67633017e+01, 1.90957668e+01], [ 6.94433436e+01, 5.29361414e+01, 3.35030492e+01, 2.39125719e+01], [ 7.92552649e+01, 6.24046262e+01, 3.99428658e+01, 2.85200193e+01], [ 8.68301825e+01, 7.07988643e+01, 4.58744872e+01, 3.27690394e+01], [ 9.22365664e+01, 7.78502690e+01, 5.10656237e+01, 3.64923102e+01], [ 9.57527634e+01, 8.33312726e+01, 5.52654037e+01, 3.95081180e+01], [ 9.77603095e+01, 8.70482108e+01, 5.82123900e+01, 4.16263817e+01], [ 9.86004778e+01, 8.88153120e+01, 5.96455150e+01, 4.26571486e+01], [ 9.84164540e+01, 8.84142419e+01, 5.93183109e+01, 4.24217705e+01], [ 9.69914128e+01, 8.55535605e+01, 5.70168132e+01, 4.07667995e+01], [ 9.35945553e+01, 7.98525651e+01, 5.25814092e+01, 3.75803906e+01], [ 8.68970325e+01, 7.08792914e+01, 4.59325274e+01, 3.28106427e+01], [ 7.51182570e+01, 5.82658943e+01, 3.70992525e+01, 2.64848548e+01], [ 5.66168535e+01, 4.18996669e+01, 2.62486163e+01, 1.87281176e+01], [ 3.09602450e+01, 2.21444807e+01, 1.37115752e+01, 9.77906130e+00], [ -6.18241273e-14, -2.23931281e-14, -8.15431343e-15, -5.67041685e-15]]) # # avp = np.array( # [[ 7.25979052e+01, 6.65166314e+01, 5.89096834e+01, # 4.94554633e+01, 3.79564622e+01, 2.66323138e+01, # 2.50358034e+01, 1.28862133e+01, 4.44927613e+00, # 1.18311566e+00, 8.09339892e-01, 5.26895921e-02]]) settle = np.array( [[ 3.8496919 , 98.88619572, 139.84619549, 171.27505923, 197.75770538, 221.03013705, 241.9290118 , 260.91094338, 278.25424995, 294.1490242 , 308.73922326, 322.14274325, 334.46119179, 345.78480236, 356.19504202, 365.76611184]]) for impl in ["vectorized"]: for dT in [10]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.clf() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() ## ## # plt.figure() # plt.plot(t, settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # legend=plt.legend() # legend.draggable() ## plt.legend.DraggableLegend() ## plt.figure() ## plt.plot(t, avp[0],'b-*', label='expected') ## plt.plot(t, a.avp[0], 'r-+', label='calculated') ## plt.legend() # plt.show() assert_allclose(a.por, por, atol=1e-1, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear_BC, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.avp, avp, atol=5, # err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear_BC, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=2, err_msg = ("Fail. test_zhuandyin2012_drn0_kv_mv_non_linear_BC, set, " "implementation='%s', dT=%s" % (impl, dT))) def test_tang_and_onitsuka_vert_and_radial_ideal(): """tang_and_onitsuka_vert_and_radial vertical and radial consolidation compare average pore pressure of whole layer and settlement H=1 kv=kh=10, mv=1, gamw=10 dTv=kvref/mvref/gamw/H**2 = 1 #re=0.5, rw = 0.03, n = 16.6667, mu = 2.074475589, etref = 3.856396307 #dTh = 2*khref/mvref/gamw/mu = 3.856396307 """ t = np.array([ 1.00000000e-03, 2.00000000e-03, 3.00000000e-03, 4.00000000e-03, 5.00000000e-03, 6.00000000e-03, 7.00000000e-03, 8.00000000e-03, 9.00000000e-03, 1.00000000e-02, 2.00000000e-02, 3.00000000e-02, 4.00000000e-02, 5.00000000e-02, 6.00000000e-02, 7.00000000e-02, 8.00000000e-02, 9.00000000e-02, 1.00000000e-01, 1.10000000e-01, 1.20000000e-01, 1.30000000e-01, 1.40000000e-01, 1.50000000e-01, 1.60000000e-01, 1.70000000e-01, 1.80000000e-01, 1.90000000e-01, 2.00000000e-01, 2.10000000e-01, 2.20000000e-01, 2.30000000e-01, 2.40000000e-01, 2.50000000e-01, 2.60000000e-01, 2.70000000e-01, 2.80000000e-01, 2.90000000e-01, 3.00000000e-01, 3.10000000e-01, 3.20000000e-01, 3.30000000e-01, 3.40000000e-01, 3.50000000e-01, 3.60000000e-01, 3.70000000e-01, 3.80000000e-01, 3.90000000e-01, 4.00000000e-01, 4.10000000e-01, 4.20000000e-01, 4.30000000e-01, 4.40000000e-01, 4.50000000e-01, 4.60000000e-01, 4.70000000e-01, 4.80000000e-01, 4.90000000e-01, 5.00000000e-01, 5.10000000e-01, 5.20000000e-01, 5.30000000e-01, 5.40000000e-01, 5.50000000e-01, 5.60000000e-01, 5.70000000e-01, 5.80000000e-01, 5.90000000e-01, 6.00000000e-01, 6.10000000e-01, 6.20000000e-01, 6.30000000e-01, 6.40000000e-01, 6.50000000e-01, 6.60000000e-01, 6.70000000e-01, 6.80000000e-01, 6.90000000e-01, 7.00000000e-01, 7.10000000e-01, 7.20000000e-01, 7.30000000e-01, 7.40000000e-01, 7.50000000e-01, 7.60000000e-01, 7.70000000e-01, 7.80000000e-01, 7.90000000e-01, 8.00000000e-01, 8.10000000e-01, 8.20000000e-01, 8.30000000e-01, 8.40000000e-01, 8.50000000e-01, 8.60000000e-01, 8.70000000e-01, 8.80000000e-01, 8.90000000e-01, 9.00000000e-01, 9.10000000e-01, 9.20000000e-01, 9.30000000e-01, 9.40000000e-01, 9.50000000e-01, 9.60000000e-01, 9.70000000e-01, 9.80000000e-01, 9.90000000e-01, 1.00000000e+00, 1.01000000e+00]) avp = 100*np.array([[ 0.00324696, 0.00641694, 0.00953238, 0.0126017 , 0.01562987, 0.01862029, 0.02157548, 0.02449743, 0.02738778, 0.03024788, 0.05738761, 0.0822719 , 0.10525907, 0.12658293, 0.1464181 , 0.16490438, 0.18215844, 0.19828034, 0.21335753, 0.22746753, 0.24067983, 0.25305715, 0.26465659, 0.27553032, 0.25547838, 0.23790104, 0.22198642, 0.2074141 , 0.19398549, 0.18155873, 0.17002455, 0.15929482, 0.14929611, 0.13996587, 0.13124986, 0.12310046, 0.11547534, 0.10833658, 0.10164995, 0.12563221, 0.14689894, 0.16627677, 0.18410003, 0.20058033, 0.21587175, 0.23009504, 0.2433491 , 0.25571746, 0.26727216, 0.27807632, 0.28818593, 0.29765116, 0.30651729, 0.31482546, 0.29236538, 0.27252759, 0.25449115, 0.2379271 , 0.22262886, 0.20844708, 0.19526545, 0.18298923, 0.1715388 , 0.1608458 , 0.15085055, 0.14150028, 0.13274787, 0.1245509 , 0.11687089, 0.10967276, 0.10292438, 0.09659617, 0.09066084, 0.08509314, 0.07986962, 0.0749685 , 0.07036946, 0.06605359, 0.06200322, 0.05820182, 0.05463397, 0.05128519, 0.04814195, 0.04519158, 0.04242218, 0.03982263, 0.03738247, 0.03509191, 0.03294176, 0.0309234 , 0.02902874, 0.02725019, 0.02558063, 0.02401338, 0.02254216, 0.02116109, 0.01986464, 0.01864762, 0.01750516, 0.01643271, 0.01542596, 0.01448089, 0.01359372, 0.0127609 , 0.01197911, 0.01124522, 0.01055628, 0.00990956, 0.00930246, 0.00873255]]) z = np.array( [ 0. , 0.11111111, 0.22222222, 0.33333333, 0.44444444, 0.55555556, 0.66666667, 0.77777778, 0.88888889, 1. ]) por = np.array( [[ 0. , 0. , 0. ], [ 10.62834433, 5.71540426, 0.79195768], [ 18.10955225, 11.14818494, 1.55981113], [ 23.22767635, 16.0566849 , 2.2801995 ], [ 26.62643019, 20.26892536, 2.93122316], [ 28.80909111, 23.69187844, 3.49311208], [ 30.15548187, 26.3014876 , 3.94882356], [ 30.93852215, 28.11965758, 4.28455203], [ 31.33977648, 29.18687894, 4.49013755], [ 31.4624026 , 29.5381209 , 4.55936353]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np ######################################### #Tang and onitsuka input #t = np.array([ 1.00000000e-03, 2.00000000e-03, 3.00000000e-03, # 4.00000000e-03, 5.00000000e-03, 6.00000000e-03, # 7.00000000e-03, 8.00000000e-03, 9.00000000e-03, # 1.00000000e-02, 2.00000000e-02, 3.00000000e-02, # 4.00000000e-02, 5.00000000e-02, 6.00000000e-02, # 7.00000000e-02, 8.00000000e-02, 9.00000000e-02, # 1.00000000e-01, 1.10000000e-01, 1.20000000e-01, # 1.30000000e-01, 1.40000000e-01, 1.50000000e-01, # 1.60000000e-01, 1.70000000e-01, 1.80000000e-01, # 1.90000000e-01, 2.00000000e-01, 2.10000000e-01, # 2.20000000e-01, 2.30000000e-01, 2.40000000e-01, # 2.50000000e-01, 2.60000000e-01, 2.70000000e-01, # 2.80000000e-01, 2.90000000e-01, 3.00000000e-01, # 3.10000000e-01, 3.20000000e-01, 3.30000000e-01, # 3.40000000e-01, 3.50000000e-01, 3.60000000e-01, # 3.70000000e-01, 3.80000000e-01, 3.90000000e-01, # 4.00000000e-01, 4.10000000e-01, 4.20000000e-01, # 4.30000000e-01, 4.40000000e-01, 4.50000000e-01, # 4.60000000e-01, 4.70000000e-01, 4.80000000e-01, # 4.90000000e-01, 5.00000000e-01, 5.10000000e-01, # 5.20000000e-01, 5.30000000e-01, 5.40000000e-01, # 5.50000000e-01, 5.60000000e-01, 5.70000000e-01, # 5.80000000e-01, 5.90000000e-01, 6.00000000e-01, # 6.10000000e-01, 6.20000000e-01, 6.30000000e-01, # 6.40000000e-01, 6.50000000e-01, 6.60000000e-01, # 6.70000000e-01, 6.80000000e-01, 6.90000000e-01, # 7.00000000e-01, 7.10000000e-01, 7.20000000e-01, # 7.30000000e-01, 7.40000000e-01, 7.50000000e-01, # 7.60000000e-01, 7.70000000e-01, 7.80000000e-01, # 7.90000000e-01, 8.00000000e-01, 8.10000000e-01, # 8.20000000e-01, 8.30000000e-01, 8.40000000e-01, # 8.50000000e-01, 8.60000000e-01, 8.70000000e-01, # 8.80000000e-01, 8.90000000e-01, 9.00000000e-01, # 9.10000000e-01, 9.20000000e-01, 9.30000000e-01, # 9.40000000e-01, 9.50000000e-01, 9.60000000e-01, # 9.70000000e-01, 9.80000000e-01, 9.90000000e-01, # 1.00000000e+00, 1.01000000e+00]) # #H = 1 #z = np.linspace(0, H,10) #kv, kh, ks, kw = (10, 10, 10, 1e7) #mv=1 #gamw = 10 #rw, rs, re = (0.03, 0.03, 0.5) #drn = 1 #surcharge_vs_time = ((0,0.15, 0.3, 0.45,100.0), (0,50,50.0,100.0,100.0)) #tpor = t[np.array([20,60,90])] #nterms = 20 # #por, avp, settle = tangandonitsuka2000(z=z, t=t, kv=kv, kh=kh, ks=ks, kw=kw, mv=mv, gamw=gamw, rw=rw, rs=rs, re=re, H=H, # drn=drn, surcharge_vs_time=surcharge_vs_time, # tpor=tpor, nterms=nterms) ################################################################## H = 1 drn = 1 dTv = 1 #dTv=kvref/mvref/gamw/H**2 #re=0.5, rw = 0.03, n = 16.6667, mu = 2.074475589, #dTh = 2*khref/mvref/gamw/mu dTh = 3.856396307 neig = 20 mvref = 1.0 kvref = 10.0 khref = 10.0 etref = 3.856396307 #2/mu/re**2 mv = PolyLine([0,1], [1,1]) kv = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) dTw=5000 kwref=1 kw = PolyLine([0,1], [1,1]) surcharge_vs_depth = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0.15,0.3,0.45,4],[0.0,50,50,100,100]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] ppress_z_tval_indexes = [20,60,90] tvals = np.%s """ % (repr(z), repr(t))) # por = 100 * TERZ1D_POR # avp = 100 * TERZ1D_AVP settle = (np.interp(t,[0,0.15,0.3,0.45,4], [0.0,50,50,100,100]) - avp) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # # # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_tang_and_onitsuka_vert_and_radial_well_resistance(): """tang_and_onitsuka_vert_and_radial vertical and radial consolidation compare average pore pressure of whole layer and settlement H=1 kv=kh=10, mv=1, gamw=10 dTv=kvref/mvref/gamw/H**2 = 1 #re=0.5, rw = 0.03, n = 16.6667, mu = 2.074475589, etref = 3.856396307 #dTh = 2*khref/mvref/gamw/mu = 3.856396307 """ t = np.array([ 1.00000000e-03, 2.00000000e-03, 3.00000000e-03, 4.00000000e-03, 5.00000000e-03, 6.00000000e-03, 7.00000000e-03, 8.00000000e-03, 9.00000000e-03, 1.00000000e-02, 2.00000000e-02, 3.00000000e-02, 4.00000000e-02, 5.00000000e-02, 6.00000000e-02, 7.00000000e-02, 8.00000000e-02, 9.00000000e-02, 1.00000000e-01, 1.10000000e-01, 1.20000000e-01, 1.30000000e-01, 1.40000000e-01, 1.50000000e-01, 1.60000000e-01, 1.70000000e-01, 1.80000000e-01, 1.90000000e-01, 2.00000000e-01, 2.10000000e-01, 2.20000000e-01, 2.30000000e-01, 2.40000000e-01, 2.50000000e-01, 2.60000000e-01, 2.70000000e-01, 2.80000000e-01, 2.90000000e-01, 3.00000000e-01, 3.10000000e-01, 3.20000000e-01, 3.30000000e-01, 3.40000000e-01, 3.50000000e-01, 3.60000000e-01, 3.70000000e-01, 3.80000000e-01, 3.90000000e-01, 4.00000000e-01, 4.10000000e-01, 4.20000000e-01, 4.30000000e-01, 4.40000000e-01, 4.50000000e-01, 4.60000000e-01, 4.70000000e-01, 4.80000000e-01, 4.90000000e-01, 5.00000000e-01, 5.10000000e-01, 5.20000000e-01, 5.30000000e-01, 5.40000000e-01, 5.50000000e-01, 5.60000000e-01, 5.70000000e-01, 5.80000000e-01, 5.90000000e-01, 6.00000000e-01, 6.10000000e-01, 6.20000000e-01, 6.30000000e-01, 6.40000000e-01, 6.50000000e-01, 6.60000000e-01, 6.70000000e-01, 6.80000000e-01, 6.90000000e-01, 7.00000000e-01, 7.10000000e-01, 7.20000000e-01, 7.30000000e-01, 7.40000000e-01, 7.50000000e-01, 7.60000000e-01, 7.70000000e-01, 7.80000000e-01, 7.90000000e-01, 8.00000000e-01, 8.10000000e-01, 8.20000000e-01, 8.30000000e-01, 8.40000000e-01, 8.50000000e-01, 8.60000000e-01, 8.70000000e-01, 8.80000000e-01, 8.90000000e-01, 9.00000000e-01, 9.10000000e-01, 9.20000000e-01, 9.30000000e-01, 9.40000000e-01, 9.50000000e-01, 9.60000000e-01, 9.70000000e-01, 9.80000000e-01, 9.90000000e-01, 1.00000000e+00, 1.01000000e+00]) avp = np.array([[ 0.32511915, 0.64395023, 0.95850548, 1.26960191, 1.57771242, 1.88315609, 2.1861672 , 2.48692722, 2.78558219, 3.0822529 , 5.95702388, 8.69654086, 11.32667999, 13.86239625, 16.31378009, 18.68823918, 20.9915131 , 23.22821996, 25.40218126, 27.51662947, 29.5743482 , 31.57777121, 33.52905472, 35.43013181, 34.20050107, 33.13149968, 32.15237839, 31.23863626, 30.37660619, 29.55741125, 28.77479332, 28.02410687, 27.30177917, 26.60499098, 25.93147359, 25.27937235, 24.6471508 , 24.03352086, 23.43739069, 25.94007773, 28.25103728, 30.44178874, 32.53758204, 34.5528131 , 36.4970684 , 38.37729705, 40.19881778, 41.9658593 , 43.6818802 , 45.34977247, 46.97199843, 48.55068709, 50.08770434, 51.58470557, 49.96092296, 48.5074373 , 47.15325214, 45.8736276 , 44.65466537, 43.48726435, 42.36494895, 41.28286238, 40.23722658, 39.2250226 , 38.24378734, 37.29147691, 36.36637054, 35.46700056, 34.59210015, 33.74056327, 32.91141376, 32.10378096, 31.31688056, 30.54999935, 29.80248297, 29.07372621, 28.36316519, 27.67027097, 26.99454454, 26.33551264, 25.69272447, 25.06574893, 24.45417241, 23.857597 , 23.27563895, 22.70792745, 22.15410362, 21.61381963, 21.08673795, 20.57253078, 20.07087947, 19.5814741 , 19.10401306, 18.6382027 , 18.18375701, 17.74039736, 17.3078522 , 16.88585686, 16.47415334, 16.07249005, 15.68062171, 15.29830908, 14.92531886, 14.5614235 , 14.20640105, 13.86003501, 13.5221142 , 13.1924326 , 12.87078925, 12.55698809]]) z = np.array( [ 0. , 0.11111111, 0.22222222, 0.33333333, 0.44444444, 0.55555556, 0.66666667, 0.77777778, 0.88888889, 1. ]) por = np.array( [[ 0. , 0. , 0. ], [ 12.53440575, 12.14329557, 5.47538418], [ 21.70121334, 23.7536632 , 10.78420485], [ 28.183674 , 34.3627168 , 15.76501214], [ 32.61240467, 43.61300164, 20.26641403], [ 35.52623848, 51.27538849, 24.15169042], [ 37.36067128, 57.23734454, 27.30293317], [ 38.44529475, 61.47089757, 29.62459012], [ 39.00780593, 63.99300069, 31.04631563], [ 39.18082801, 64.82986237, 31.52505531]]) reader = textwrap.dedent("""\ #from geotecha.piecewise.piecewise_linear_1d import PolyLine #import numpy as np ######################################### #Tang and onitsuka input #t = np.array([ 1.00000000e-03, 2.00000000e-03, 3.00000000e-03, # 4.00000000e-03, 5.00000000e-03, 6.00000000e-03, # 7.00000000e-03, 8.00000000e-03, 9.00000000e-03, # 1.00000000e-02, 2.00000000e-02, 3.00000000e-02, # 4.00000000e-02, 5.00000000e-02, 6.00000000e-02, # 7.00000000e-02, 8.00000000e-02, 9.00000000e-02, # 1.00000000e-01, 1.10000000e-01, 1.20000000e-01, # 1.30000000e-01, 1.40000000e-01, 1.50000000e-01, # 1.60000000e-01, 1.70000000e-01, 1.80000000e-01, # 1.90000000e-01, 2.00000000e-01, 2.10000000e-01, # 2.20000000e-01, 2.30000000e-01, 2.40000000e-01, # 2.50000000e-01, 2.60000000e-01, 2.70000000e-01, # 2.80000000e-01, 2.90000000e-01, 3.00000000e-01, # 3.10000000e-01, 3.20000000e-01, 3.30000000e-01, # 3.40000000e-01, 3.50000000e-01, 3.60000000e-01, # 3.70000000e-01, 3.80000000e-01, 3.90000000e-01, # 4.00000000e-01, 4.10000000e-01, 4.20000000e-01, # 4.30000000e-01, 4.40000000e-01, 4.50000000e-01, # 4.60000000e-01, 4.70000000e-01, 4.80000000e-01, # 4.90000000e-01, 5.00000000e-01, 5.10000000e-01, # 5.20000000e-01, 5.30000000e-01, 5.40000000e-01, # 5.50000000e-01, 5.60000000e-01, 5.70000000e-01, # 5.80000000e-01, 5.90000000e-01, 6.00000000e-01, # 6.10000000e-01, 6.20000000e-01, 6.30000000e-01, # 6.40000000e-01, 6.50000000e-01, 6.60000000e-01, # 6.70000000e-01, 6.80000000e-01, 6.90000000e-01, # 7.00000000e-01, 7.10000000e-01, 7.20000000e-01, # 7.30000000e-01, 7.40000000e-01, 7.50000000e-01, # 7.60000000e-01, 7.70000000e-01, 7.80000000e-01, # 7.90000000e-01, 8.00000000e-01, 8.10000000e-01, # 8.20000000e-01, 8.30000000e-01, 8.40000000e-01, # 8.50000000e-01, 8.60000000e-01, 8.70000000e-01, # 8.80000000e-01, 8.90000000e-01, 9.00000000e-01, # 9.10000000e-01, 9.20000000e-01, 9.30000000e-01, # 9.40000000e-01, 9.50000000e-01, 9.60000000e-01, # 9.70000000e-01, 9.80000000e-01, 9.90000000e-01, # 1.00000000e+00, 1.01000000e+00]) # #H = 1 #z = np.linspace(0, H,10) #kv, kh, ks, kw = (10, 10, 10, 1) #mv=1 #gamw = 10 #rw, rs, re = (0.03, 0.03, 0.5) #drn = 1 #surcharge_vs_time = ((0,0.15, 0.3, 0.45,100.0), (0,50,50.0,100.0,100.0)) #tpor = t[np.array([20,60,90])] #nterms = 20 # #por, avp, settle = tangandonitsuka2000(z=z, t=t, kv=kv, kh=kh, ks=ks, kw=kw, mv=mv, gamw=gamw, rw=rw, rs=rs, re=re, H=H, # drn=drn, surcharge_vs_time=surcharge_vs_time, # tpor=tpor, nterms=nterms) ################################################################## H = 1 drn = 1 dTv = 1 #dTv=kvref/mvref/gamw/H**2 #re=0.5, rw = 0.03, n = 16.6667, mu = 2.074475589, #dTh = 2*khref/mvref/gamw/mu dTh = 3.856396307 neig = 20 mvref = 1.0 kvref = 10.0 khref = 10.0 etref = 3.856396307 #2/mu/re**2 mv = PolyLine([0,1], [1,1]) kv = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) et = PolyLine([0,1], [1,1]) dTw = 0.0003613006824568446 #dTv=kwref/mvref/gamw/H**2 kwref = 1 kw = PolyLine([0,1], [1,1]) surcharge_vs_depth = PolyLine([0,1], [1,1]) surcharge_vs_time = PolyLine([0,0.15,0.3,0.45,4],[0.0,50,50,100,100]) ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] ppress_z_tval_indexes = [20,60,90] tvals = np.%s """ % (repr(z), repr(t))) # por = 100 * TERZ1D_POR # avp = 100 * TERZ1D_AVP settle = (np.interp(t,[0,0.15,0.3,0.45,4], [0.0,50,50,100,100]) - avp) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # # # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1e-2, err_msg = ("Fail. test_tang_and_onitsuka_vert_and_radial, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_two_layers_no_drain_in_bottom_layer(): """2 soil layers kw=0 in bottom layer should be same as 2 layers with no drain in bottom layer. """ t = np.array( [ 0.01 , 0.01114137, 0.01241302, 0.01382981, 0.01540831, 0.01716698, 0.01912637, 0.02130941, 0.02374161, 0.02645142, 0.02947052, 0.03283421, 0.03658182, 0.04075718, 0.0454091 , 0.05059197, 0.05636641, 0.06279993, 0.06996776, 0.0779537 , 0.08685114, 0.09676411, 0.10780851, 0.1201135 , 0.13382295, 0.14909717, 0.16611474, 0.18507465, 0.2061986 , 0.22973358, 0.25595479, 0.28516882, 0.31771727, 0.35398071, 0.39438316, 0.43939706, 0.48954872, 0.54542456, 0.60767794, 0.67703675, 0.75431201, 0.84040726, 0.93632921, 1.04319944, 1.16226758, 1.29492584, 1.44272539, 1.60739439, 1.7908583 , 1.99526231]) avp = np.array( [[ 18.02593864, 17.87835341, 17.71980093, 17.54964207, 17.36724711, 17.17200801, 16.96335316, 16.7407645 , 16.50379736, 16.25210253, 15.98545054, 15.70375721, 15.40710959, 15.09579046, 14.77029944, 14.43136775, 14.07996352, 13.7172843 , 13.3447335 , 12.96387856, 12.57638968, 12.18396013, 11.7882113 , 11.39058856, 10.99225595, 10.59400026, 10.1961553 , 9.79855724, 9.40053937, 9.00097176, 8.59834691, 8.19090865, 7.77681827, 7.35434941, 6.92210151, 6.47921886, 6.02559885, 5.56206938, 5.09051311, 4.61391873, 4.13634604, 3.66280228, 3.19903619, 2.7512627 , 2.32583262, 1.92886278, 1.56584432, 1.24125415, 0.95820373, 0.71816733]]) settle = np.array( [[ 1.97406136, 2.12164659, 2.28019907, 2.45035793, 2.63275289, 2.82799199, 3.03664684, 3.2592355 , 3.49620264, 3.74789747, 4.01454946, 4.29624279, 4.59289041, 4.90420954, 5.22970056, 5.56863225, 5.92003648, 6.2827157 , 6.6552665 , 7.03612144, 7.42361032, 7.81603987, 8.2117887 , 8.60941144, 9.00774405, 9.40599974, 9.8038447 , 10.20144276, 10.59946063, 10.99902824, 11.40165309, 11.80909135, 12.22318173, 12.64565059, 13.07789849, 13.52078114, 13.97440115, 14.43793062, 14.90948689, 15.38608127, 15.86365396, 16.33719772, 16.80096381, 17.2487373 , 17.67416738, 18.07113722, 18.43415568, 18.75874585, 19.04179627, 19.28183267]]) z = np.array( [ 0., 0.01010101, 0.02020202, 0.03030303, 0.04040404, 0.05050505, 0.06060606, 0.07070707, 0.08080808, 0.09090909, 0.1010101 , 0.11111111, 0.12121212, 0.13131313, 0.14141414, 0.15151515, 0.16161616, 0.17171717, 0.18181818, 0.19191919, 0.2020202 , 0.21212121, 0.22222222, 0.23232323, 0.24242424, 0.25252525, 0.26262626, 0.27272727, 0.28282828, 0.29292929, 0.3030303 , 0.31313131, 0.32323232, 0.33333333, 0.34343434, 0.35353535, 0.36363636, 0.37373737, 0.38383838, 0.39393939, 0.4040404 , 0.41414141, 0.42424242, 0.43434343, 0.44444444, 0.45454545, 0.46464646, 0.47474747, 0.48484848, 0.49494949, 0.50505051, 0.51515152, 0.52525253, 0.53535354, 0.54545455, 0.55555556, 0.56565657, 0.57575758, 0.58585859, 0.5959596 , 0.60606061, 0.61616162, 0.62626263, 0.63636364, 0.64646465, 0.65656566, 0.66666667, 0.67676768, 0.68686869, 0.6969697 , 0.70707071, 0.71717172, 0.72727273, 0.73737374, 0.74747475, 0.75757576, 0.76767677, 0.77777778, 0.78787879, 0.7979798 , 0.80808081, 0.81818182, 0.82828283, 0.83838384, 0.84848485, 0.85858586, 0.86868687, 0.87878788, 0.88888889, 0.8989899 , 0.90909091, 0.91919192, 0.92929293, 0.93939394, 0.94949495, 0.95959596, 0.96969697, 0.97979798, 0.98989899, 1. ]) por = np.array( [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 2.05562052e+00, 1.25986166e+00, 6.78991414e-01, 2.84541748e-01, 9.10626442e-02, 3.66582957e-02, 1.69401561e-02, 3.80087999e-03], [ 4.06978916e+00, 2.50772351e+00, 1.35494648e+00, 5.68592909e-01, 1.82215916e-01, 7.34271212e-02, 3.39358576e-02, 7.61423113e-03], [ 6.00356619e+00, 3.73195163e+00, 2.02489728e+00, 8.51696052e-01, 2.73576045e-01, 1.10434600e-01, 5.10513206e-02, 1.14544710e-02], [ 7.82272729e+00, 4.92156423e+00, 2.68594746e+00, 1.13339909e+00, 3.65261297e-01, 1.47810954e-01, 6.83518475e-02, 1.53362612e-02], [ 9.49951294e+00, 6.06649007e+00, 3.33527234e+00, 1.41322066e+00, 4.57359240e-01, 1.85665856e-01, 8.58926759e-02, 1.92720039e-02], [ 1.10138572e+01, 7.15787331e+00, 3.97020234e+00, 1.69069012e+00, 5.49949944e-01, 2.24102244e-01, 1.03725678e-01, 2.32733458e-02], [ 1.23539525e+01, 8.18834767e+00, 4.58833192e+00, 1.96541993e+00, 6.43158810e-01, 2.63251001e-01, 1.21916356e-01, 2.73549933e-02], [ 1.35160196e+01, 9.15213127e+00, 5.18751845e+00, 2.23709246e+00, 7.37150779e-01, 3.03268914e-01, 1.40542948e-01, 3.15345114e-02], [ 1.45034034e+01, 1.00449646e+01, 5.76578610e+00, 2.50536456e+00, 8.32060724e-01, 3.44294329e-01, 1.59674785e-01, 3.58274664e-02], [ 1.53253465e+01, 1.08640917e+01, 6.32131649e+00, 2.76983967e+00, 9.27965781e-01, 3.86427657e-01, 1.79362674e-01, 4.02452659e-02], [ 1.59956837e+01, 1.16083632e+01, 6.85260170e+00, 3.03018014e+00, 1.02495891e+00, 4.29776751e-01, 1.99660976e-01, 4.48001158e-02], [ 1.65314127e+01, 1.22782578e+01, 7.35857377e+00, 3.28621398e+00, 1.12322631e+00, 4.74507099e-01, 2.20652168e-01, 4.95105326e-02], [ 1.69510760e+01, 1.28756170e+01, 7.83851004e+00, 3.53786532e+00, 1.22300400e+00, 5.20816277e-01, 2.42434492e-01, 5.43985725e-02], [ 1.72732221e+01, 1.34032356e+01, 8.29182518e+00, 3.78498779e+00, 1.32446005e+00, 5.68859447e-01, 2.65085625e-01, 5.94816751e-02], [ 1.75153748e+01, 1.38646581e+01, 8.71805753e+00, 4.02735125e+00, 1.42767771e+00, 6.18736572e-01, 2.88656320e-01, 6.47712366e-02], [ 1.76935872e+01, 1.42642400e+01, 9.11710254e+00, 4.26483640e+00, 1.53278641e+00, 6.70573816e-01, 3.13210040e-01, 7.02815062e-02], [ 1.78221743e+01, 1.46071103e+01, 9.48936081e+00, 4.49757345e+00, 1.64006257e+00, 7.24588524e-01, 3.38854727e-01, 7.60367172e-02], [ 1.79132974e+01, 1.48987602e+01, 9.83553656e+00, 4.72580202e+00, 1.74983905e+00, 7.81034321e-01, 3.65716164e-01, 8.20651082e-02], [ 1.79766272e+01, 1.51445247e+01, 1.01563148e+01, 4.94962489e+00, 1.86233366e+00, 8.40092957e-01, 3.93885235e-01, 8.83870841e-02], [ 1.80195496e+01, 1.53494630e+01, 1.04523667e+01, 5.16902028e+00, 1.97765035e+00, 9.01872744e-01, 4.23417032e-01, 9.50150160e-02], [ 1.80478503e+01, 1.55186405e+01, 1.07246977e+01, 5.38413656e+00, 2.09597996e+00, 9.66533630e-01, 4.54391769e-01, 1.01966913e-01], [ 1.80662269e+01, 1.56572445e+01, 1.09748268e+01, 5.59546291e+00, 2.21772452e+00, 1.03436719e+00, 4.86953872e-01, 1.09275200e-01], [ 1.80782168e+01, 1.57701702e+01, 1.12044654e+01, 5.80359717e+00, 2.34334503e+00, 1.10570326e+00, 5.21266616e-01, 1.16976531e-01], [ 1.80859786e+01, 1.58615086e+01, 1.14150755e+01, 6.00890456e+00, 2.47312536e+00, 1.18076135e+00, 5.57439678e-01, 1.25095531e-01], [ 1.80906357e+01, 1.59346218e+01, 1.16079283e+01, 6.21157118e+00, 2.60720185e+00, 1.25966668e+00, 5.95536796e-01, 1.33646512e-01], [ 1.80930997e+01, 1.59927387e+01, 1.17846201e+01, 6.41202956e+00, 2.74585451e+00, 1.34263190e+00, 6.35664302e-01, 1.42653348e-01], [ 1.80944960e+01, 1.60392359e+01, 1.19473001e+01, 6.61116490e+00, 2.88965737e+00, 1.43005370e+00, 6.78018321e-01, 1.52160068e-01], [ 1.80957291e+01, 1.60771499e+01, 1.20981978e+01, 6.80995564e+00, 3.03924030e+00, 1.52236606e+00, 7.22813391e-01, 1.62214836e-01], [ 1.80969034e+01, 1.61085810e+01, 1.22390365e+01, 7.00900862e+00, 3.19496835e+00, 1.61983851e+00, 7.70184138e-01, 1.72847877e-01], [ 1.80975980e+01, 1.61349333e+01, 1.23711871e+01, 7.20867828e+00, 3.35701566e+00, 1.72261964e+00, 8.20206314e-01, 1.84076202e-01], [ 1.80978176e+01, 1.61578205e+01, 1.24964258e+01, 7.40967825e+00, 3.52578454e+00, 1.83099894e+00, 8.73024317e-01, 1.95932230e-01], [ 1.80983643e+01, 1.61794414e+01, 1.26172283e+01, 7.61333250e+00, 3.70208829e+00, 1.94552420e+00, 9.28908577e-01, 2.08476672e-01], [ 1.81000339e+01, 1.62018446e+01, 1.27360680e+01, 7.82102538e+00, 3.88678397e+00, 2.06677438e+00, 9.88145045e-01, 2.21773722e-01], [ 1.81027168e+01, 1.62260938e+01, 1.28546163e+01, 8.03355595e+00, 4.08032705e+00, 2.19507974e+00, 1.05089878e+00, 2.35860441e-01], [ 1.81058045e+01, 1.62526620e+01, 1.29740448e+01, 8.25136565e+00, 4.28291792e+00, 2.33061024e+00, 1.11725681e+00, 2.50756369e-01], [ 1.81095573e+01, 1.62827408e+01, 1.30961613e+01, 8.47544533e+00, 4.49512325e+00, 2.47376386e+00, 1.18741731e+00, 2.66506000e-01], [ 1.81155659e+01, 1.63186915e+01, 1.32237945e+01, 8.70765607e+00, 4.71810895e+00, 2.62531624e+00, 1.26176269e+00, 2.83195183e-01], [ 1.81254743e+01, 1.63628374e+01, 1.33596952e+01, 8.94985732e+00, 4.95305435e+00, 2.78605752e+00, 1.34068289e+00, 3.00911458e-01], [ 1.81396357e+01, 1.64161739e+01, 1.35053517e+01, 9.20294655e+00, 5.20048966e+00, 2.95637618e+00, 1.42437252e+00, 3.19698520e-01], [ 1.81578285e+01, 1.64790210e+01, 1.36615449e+01, 9.46728165e+00, 5.46057213e+00, 3.13642740e+00, 1.51291266e+00, 3.39574576e-01], [ 1.81814579e+01, 1.65530899e+01, 1.38301894e+01, 9.74414988e+00, 5.73409163e+00, 3.32676174e+00, 1.60657705e+00, 3.60601086e-01], [ 1.82142256e+01, 1.66421109e+01, 1.40149053e+01, 1.00362427e+01, 6.02281375e+00, 3.52854518e+00, 1.70593964e+00, 3.82906896e-01], [ 1.82598458e+01, 1.67496569e+01, 1.42190781e+01, 1.03461151e+01, 6.32843987e+00, 3.74291340e+00, 1.81156028e+00, 4.06617686e-01], [ 1.83195944e+01, 1.68767966e+01, 1.44437454e+01, 1.06744760e+01, 6.65142810e+00, 3.97023148e+00, 1.92362389e+00, 4.31774982e-01], [ 1.83936222e+01, 1.70233166e+01, 1.46886736e+01, 1.10210191e+01, 6.99153949e+00, 4.21042835e+00, 2.04210301e+00, 4.58372631e-01], [ 1.84854082e+01, 1.71919460e+01, 1.49561524e+01, 1.13874555e+01, 7.34991267e+00, 4.46429367e+00, 2.16738950e+00, 4.86498608e-01], [ 1.86034190e+01, 1.73899685e+01, 1.52524813e+01, 1.17787407e+01, 7.72993606e+00, 4.73403351e+00, 2.30056585e+00, 5.16395916e-01], [ 1.87553299e+01, 1.76237886e+01, 1.55831379e+01, 1.21992398e+01, 8.13465336e+00, 5.02165754e+00, 2.44262007e+00, 5.48286351e-01], [ 1.89382521e+01, 1.78896791e+01, 1.59444713e+01, 1.26460422e+01, 8.56216203e+00, 5.32609341e+00, 2.59303948e+00, 5.82054847e-01], [ 1.91345873e+01, 1.81698299e+01, 1.63200883e+01, 1.31059554e+01, 9.00346539e+00, 5.64181399e+00, 2.74914017e+00, 6.17098967e-01], [ 1.93193626e+01, 1.84392866e+01, 1.66869996e+01, 1.35603096e+01, 9.44566173e+00, 5.96079822e+00, 2.90702027e+00, 6.52542875e-01], [ 1.94739543e+01, 1.86790060e+01, 1.70273194e+01, 1.39942958e+01, 9.87830343e+00, 6.27649452e+00, 3.06348991e+00, 6.87670545e-01], [ 1.95945372e+01, 1.88839740e+01, 1.73356247e+01, 1.44029384e+01, 1.02975418e+01, 6.58642788e+00, 3.21734206e+00, 7.22211032e-01], [ 1.96887424e+01, 1.90601614e+01, 1.76163417e+01, 1.47891267e+01, 1.07048744e+01, 6.89144450e+00, 3.36898611e+00, 7.56256230e-01], [ 1.97654357e+01, 1.92149177e+01, 1.78752097e+01, 1.51568579e+01, 1.11025813e+01, 7.19287669e+00, 3.51906778e+00, 7.89951076e-01], [ 1.98280036e+01, 1.93506316e+01, 1.81136117e+01, 1.55066972e+01, 1.14906182e+01, 7.49059990e+00, 3.66752260e+00, 8.23281093e-01], [ 1.98759361e+01, 1.94662250e+01, 1.83298971e+01, 1.58368009e+01, 1.18672879e+01, 7.78344350e+00, 3.81377547e+00, 8.56117177e-01], [ 1.99101245e+01, 1.95621686e+01, 1.85238747e+01, 1.61465006e+01, 1.22316808e+01, 8.07071284e+00, 3.95748188e+00, 8.88381988e-01], [ 1.99346954e+01, 1.96422202e+01, 1.86983936e+01, 1.64375879e+01, 1.25845702e+01, 8.35275360e+00, 4.09880309e+00, 9.20111710e-01], [ 1.99539901e+01, 1.97105518e+01, 1.88567878e+01, 1.67122946e+01, 1.29270486e+01, 8.63010556e+00, 4.23799424e+00, 9.51363598e-01], [ 1.99694135e+01, 1.97687301e+01, 1.90001708e+01, 1.69711270e+01, 1.32590479e+01, 8.90257760e+00, 4.37495375e+00, 9.82114837e-01], [ 1.99801567e+01, 1.98163574e+01, 1.91279944e+01, 1.72132985e+01, 1.35796230e+01, 9.16942145e+00, 4.50930778e+00, 1.01228149e+00], [ 1.99862887e+01, 1.98539620e+01, 1.92406280e+01, 1.74387868e+01, 1.38883536e+01, 9.43020686e+00, 4.64083644e+00, 1.04181419e+00], [ 1.99899347e+01, 1.98840852e+01, 1.93403345e+01, 1.76491253e+01, 1.41858944e+01, 9.68516804e+00, 4.76964272e+00, 1.07073602e+00], [ 1.99933051e+01, 1.99093878e+01, 1.94295728e+01, 1.78460565e+01, 1.44730645e+01, 9.93463854e+00, 4.89587746e+00, 1.09908082e+00], [ 1.99965806e+01, 1.99306165e+01, 1.95091656e+01, 1.80300647e+01, 1.47498442e+01, 1.01784245e+01, 5.01943400e+00, 1.12682463e+00], [ 1.99985080e+01, 1.99471402e+01, 1.95787604e+01, 1.82007333e+01, 1.50156123e+01, 1.04159511e+01, 5.14001953e+00, 1.15390169e+00], [ 1.99986875e+01, 1.99590988e+01, 1.96387407e+01, 1.83582686e+01, 1.52701838e+01, 1.06469109e+01, 5.25747076e+00, 1.18027534e+00], [ 1.99983823e+01, 1.99681637e+01, 1.96909009e+01, 1.85040435e+01, 1.55141877e+01, 1.08715018e+01, 5.37187013e+00, 1.20596404e+00], [ 1.99989230e+01, 1.99760226e+01, 1.97370839e+01, 1.86395136e+01, 1.57483324e+01, 1.10899707e+01, 5.48332354e+00, 1.23099153e+00], [ 2.00000677e+01, 1.99828219e+01, 1.97777747e+01, 1.87650883e+01, 1.59726329e+01, 1.13021313e+01, 5.59172561e+00, 1.25533414e+00], [ 2.00005537e+01, 1.99876858e+01, 1.98125510e+01, 1.88804818e+01, 1.61866455e+01, 1.15075096e+01, 5.69683041e+00, 1.27893664e+00], [ 1.99999608e+01, 1.99904839e+01, 1.98416561e+01, 1.89859652e+01, 1.63903212e+01, 1.17058767e+01, 5.79851117e+00, 1.30177053e+00], [ 1.99992867e+01, 1.99923831e+01, 1.98664900e+01, 1.90827602e+01, 1.65842774e+01, 1.18974209e+01, 5.89684413e+00, 1.32385291e+00], [ 1.99995358e+01, 1.99945183e+01, 1.98884168e+01, 1.91720879e+01, 1.67691522e+01, 1.20823460e+01, 5.99191295e+00, 1.34520251e+00], [ 2.00003627e+01, 1.99967116e+01, 1.99076119e+01, 1.92542424e+01, 1.69449702e+01, 1.22604753e+01, 6.08361601e+00, 1.36579651e+00], [ 2.00006244e+01, 1.99979979e+01, 1.99235256e+01, 1.93289533e+01, 1.71113852e+01, 1.24314031e+01, 6.17174012e+00, 1.38558701e+00], [ 1.99999963e+01, 1.99981600e+01, 1.99362544e+01, 1.93964768e+01, 1.72684242e+01, 1.25949591e+01, 6.25618673e+00, 1.40455186e+00], [ 1.99993890e+01, 1.99981307e+01, 1.99469016e+01, 1.94578847e+01, 1.74166857e+01, 1.27513338e+01, 6.33703331e+00, 1.42270842e+00], [ 1.99996465e+01, 1.99987649e+01, 1.99564797e+01, 1.95141870e+01, 1.75567431e+01, 1.29007077e+01, 6.41435249e+00, 1.44007297e+00], [ 2.00003842e+01, 1.99997414e+01, 1.99649264e+01, 1.95655418e+01, 1.76886034e+01, 1.30429129e+01, 6.48804774e+00, 1.45662381e+00], [ 2.00005644e+01, 2.00001084e+01, 1.99715908e+01, 1.96116375e+01, 1.78119648e+01, 1.31775942e+01, 6.55793140e+00, 1.47231878e+00], [ 1.99999539e+01, 1.99996671e+01, 1.99764717e+01, 1.96526797e+01, 1.79268879e+01, 1.33046281e+01, 6.62392921e+00, 1.48714120e+00], [ 1.99994234e+01, 1.99992588e+01, 1.99804787e+01, 1.96895985e+01, 1.80339391e+01, 1.34242136e+01, 6.68612432e+00, 1.50110969e+00], [ 1.99997093e+01, 1.99995936e+01, 1.99843859e+01, 1.97232140e+01, 1.81336214e+01, 1.35365177e+01, 6.74458491e+00, 1.51423953e+00], [ 2.00003941e+01, 2.00002862e+01, 1.99879698e+01, 1.97535442e+01, 1.82259003e+01, 1.36413798e+01, 6.79922012e+00, 1.52651032e+00], [ 2.00005147e+01, 2.00004305e+01, 1.99905237e+01, 1.97802122e+01, 1.83104793e+01, 1.37384831e+01, 6.84986370e+00, 1.53788470e+00], [ 1.99999140e+01, 1.99998818e+01, 1.99920061e+01, 1.98033614e+01, 1.83874225e+01, 1.38277441e+01, 6.89646362e+00, 1.54835097e+00], [ 1.99994438e+01, 1.99994494e+01, 1.99932167e+01, 1.98237965e+01, 1.84572518e+01, 1.39093735e+01, 6.93911184e+00, 1.55792975e+00], [ 1.99997591e+01, 1.99997563e+01, 1.99947775e+01, 1.98421700e+01, 1.85203916e+01, 1.39835304e+01, 6.97787610e+00, 1.56663624e+00], [ 2.00004073e+01, 2.00003757e+01, 1.99963615e+01, 1.98583648e+01, 1.85767467e+01, 1.40500592e+01, 7.01267151e+00, 1.57445135e+00], [ 2.00004768e+01, 2.00004454e+01, 1.99972421e+01, 1.98719306e+01, 1.86259978e+01, 1.41086735e+01, 7.04335044e+00, 1.58134194e+00], [ 1.99998769e+01, 1.99998798e+01, 1.99973793e+01, 1.98829503e+01, 1.86681918e+01, 1.41593249e+01, 7.06988202e+00, 1.58730105e+00], [ 1.99994552e+01, 1.99994818e+01, 1.99975210e+01, 1.98921215e+01, 1.87037974e+01, 1.42022384e+01, 7.09236937e+00, 1.59235183e+00], [ 1.99998018e+01, 1.99998107e+01, 1.99981922e+01, 1.98999496e+01, 1.87331546e+01, 1.42375688e+01, 7.11088248e+00, 1.59650997e+00], [ 2.00004249e+01, 2.00004013e+01, 1.99989956e+01, 1.99061888e+01, 1.87560932e+01, 1.42651629e+01, 7.12534221e+00, 1.59975771e+00], [ 2.00004476e+01, 2.00004232e+01, 1.99992023e+01, 1.99103129e+01, 1.87722527e+01, 1.42847588e+01, 7.13561753e+00, 1.60206561e+00], [ 1.99998402e+01, 1.99998483e+01, 1.99987965e+01, 1.99123488e+01, 1.87816499e+01, 1.42963407e+01, 7.14169820e+00, 1.60343138e+00], [ 1.99994588e+01, 1.99994872e+01, 1.99985089e+01, 1.99129033e+01, 1.87846978e+01, 1.43001503e+01, 7.14370042e+00, 1.60388110e+00]]) reader = textwrap.dedent("""\ ############## ##Speccon1dVR input (et=0 in 2nd layer) #H = 1 #drn = 1 #dT = 1 #dTh = 5 #dTv = 0.1 * 0.25 #neig = 40 # #mvref = 2.0 #kvref = 1.0 #khref = 1.0 #etref = 1.0 # # #mv = PolyLine([0,1], [0.5,0.5]) #kh = PolyLine([0,1], [1,1]) #kv = PolyLine([0,1], [5,5]) #et = PolyLine([0,0.5, 0.5, 1], [1,1, 0,0]) #surcharge_vs_depth = [PolyLine([0,1], [1,1]), PolyLine([0,1], [1,1])] #surcharge_vs_time = [PolyLine([0,0,10], [0,10,10]), PolyLine([0,0,10], [0,10,10])] # #ppress_z = np.linspace(0,1,100) #avg_ppress_z_pairs = [[0,1]] #settlement_z_pairs = [[0,1]] # #tvals = np.logspace(-2, 0.3,50) #ppress_z_tval_indexes = np.arange(len(tvals))[::len(tvals)//7] ############### H = 1 drn = 1 dTh = 5 dTv = 0.1 * 0.25 dTw = 100000 neig = 200 mvref = 2.0 kvref = 1.0 khref = 1.0 etref = 1.0 kwref = 1.0 kw = PolyLine([0, 0.5, 0.5, 1.0], [1, 1, 0,0]) mv = PolyLine([0,1], [0.5,0.5]) kh = PolyLine([0,1], [1,1]) kv = PolyLine([0,1], [5,5]) et = PolyLine([0,1], [1,1]) #et = PolyLine([0, 0.5, 0.5, 1.0], [1, 1, 0,0]) surcharge_vs_depth = [PolyLine([0,1], [1,1]), PolyLine([0,1], [1,1])] surcharge_vs_time = [PolyLine([0,0,10], [0,10,10]), PolyLine([0,0,10], [0,10,10])] ppress_z = np.%s avg_ppress_z_pairs = [[0,1]] settlement_z_pairs = [[0,1]] tvals = np.%s ppress_z_tval_indexes = np.arange(len(tvals))[::len(tvals)//7] """ % (repr(z), repr(t))) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # # # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() assert_allclose(a.avp, avp, atol=1, err_msg = ("Fail. two_layers_no_drain_in_bottom_layer, avp, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=1, err_msg = ("Fail. two_layers_no_drain_in_bottom_layer, por, " "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.set, settle, atol=1, err_msg = ("Fail. two_layers_no_drain_in_bottom_layer, settle, " "implementation='%s', dT=%s" % (impl, dT))) def test_dengetal2013(): """ deng et al uses an approximation for the well resistance term I use an 'exact method'. The tolerance for checking is very crude but really it is for ball park figures. """ t = np.array( [11025., 110250.]) z = np.array( [0.05, 0.1, 0.2, 0.5, 0.8, 1.0]) por_99 = np.array( [[ 0.81168066, 0.12412292], [ 0.83668852, 0.16812724], [ 0.87096244, 0.25118534], [ 0.92073203, 0.43785721], [ 0.94257216, 0.55353625], [ 0.95050775, 0.60194473]]) por_0=np.array([[ 0.81096427, 0.12303176], [ 0.83451179, 0.16380411], [ 0.86536448, 0.23549986], [ 0.90572564, 0.37150668], [ 0.91883398, 0.42891426], [ 0.92092841, 0.43879202]]) por=por_99 reader = textwrap.dedent("""\ ############## ##dengetal2013 input #dengetal2013(z=np.array([0.05, 0.1, 0.2, 0.5, 0.8, 1.0])*20, # t=[11025., 110250.], # rw=0.035, re=0.525, # A1=1, A2=0.99, A3=9.07029478e-06, # H=20, # rs=0.175, # ks=2e-8/1.8, # kw0=1e-3, # kh=2e-8, # mv=0.2e-3, # gamw=10, # ui=1) ############### H = 20 drn = 1 #re=0.525, rw=0.035, rs=0.175, kh/ks=1.8, n=15, s=5, kap=1.8 #mu=3.18131104929, eta = 2/re**2/mu=2.28089479942 mvref = 0.2e-3 khref = 2e-8 etref = 2.28089479942 kwref = 1e-3 dTh=khref/mvref*etref/10 dTw=kwref/H**2/mvref/10 / (15**2-1) #dTw = 100000 neig = 40 kw = PolyLine([0, 1], [1, 0.01]) #kw = PolyLine([0, 1], [1, 1]) mv = PolyLine([0,1], [1,1]) kh = PolyLine([0,1], [1,1]) #kv = PolyLine([0,1], [5,5]) et = PolyLine([0,1], [1,1]) surcharge_vs_depth = [PolyLine([0,1], [1,1])] surcharge_vs_time = [PolyLine([0,0,10], [0,1,1])] ppress_z = np.%s tvals = np.%s """ % (repr(z), repr(t))) for impl in ["vectorized"]: for dT in [0.1]: a = Speccon1dVRW(reader + "\n" + "implementation = '%s'" % impl + "\n" + "dT = %s" % dT) a.make_all() # plt.figure() # plt.plot(por, z,'b-*', label='expected') # plt.plot(a.por, z, 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t,settle[0],'b-*', label='expected') # plt.plot(t, a.set[0], 'r-+', label='calculated') # plt.legend() # plt.figure() # plt.plot(t, avp[0],'b-*', label='expected') # plt.plot(t, a.avp[0], 'r-+', label='calculated') # plt.legend() # plt.show() # assert_allclose(a.avp, avp, atol=1, # err_msg = ("Fail. dengetal2013, avp, " # "implementation='%s', dT=%s" % (impl, dT))) assert_allclose(a.por, por, atol=0.1, err_msg = ("Fail. dengetal2013, por, " "implementation='%s', dT=%s" % (impl, dT))) # assert_allclose(a.set, settle, atol=1, # err_msg = ("Fail. dengetal2013, settle, " # "implementation='%s', dT=%s" % (impl, dT))) if __name__ == '__main__': import nose nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest']) # nose.runmodule(argv=['nose', '--verbosity=3']) # test_BC_terzaghi_1d_PTIB() # test_terzaghi_1d_PTPB() # test_schiffman_and_stein_1970() # print(np.append(0.5*TERZ1D_Z, 1-0.5*TERZ1D_Z[::-1])) # test_terzaghi_1d() # test_fixed_ppress_BC_terzaghi_PTPB() # test_hansbo_avp_vacuum() # test_terzaghi_1d_PTPB_bot_BC_gradient() # test_terzaghi_1d_pumping() # test_tang_and_onitsuka_vert_and_radial() # test_nogamiandli2003_lam_5() # test_nogamiandli2003_lam_100() # test_zhuandyin2012_drn0_kv_linear_mv_const() # test_zhuandyin2012_drn1_kv_linear_mv_const() # test_zhuandyin2012_drn0_kv_const_mv_linear() # test_zhuandyin2012_drn1_kv_const_mv_linear() # test_zhuandyin2012_drn0_kv_linear_mv_linear() # test_zhuandyin2012_drn1_kv_linear_mv_linear() # test_zhuandyin2012_drn0_kv_mv_non_linear() # test_zhuandyin2012_drn1_kv_mv_non_linear() # test_zhuandyin2012_drn0_kv_mv_non_linear_BC() # test_schiffman_and_stein_1970() # test_tang_and_onitsuka_vert_and_radial_well_resistance() # test_two_layers_no_drain_in_bottom_layer() # test_dengetal2013()
gpl-3.0
7,107,741,138,485,871,000
36.641975
162
0.514174
false
thekingofkings/embedding
python/crawl_zillow.py
1
6999
# -*- coding: utf-8 -*- """ Created on Sun Aug 28 15:18:12 2016 @author: gjz5038 """ import requests import numpy as np import time session = requests.Session() session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', 'Host': 'www.zillow.com', 'Accept-Language': 'en-US,en;q=0.9', 'Upgrade-Insecure-Requests': '1'}) def split_each_item(content): #split the item on each page (one page contains several houses) list_content = content.split('</div></article>') list_item = [] for item in list_content: if item[:5] == '</li>': dict_item = convert_to_dict(item) list_item.append(dict_item) return list_item def convert_to_dict(item_s): zpid = crawl_one_prop('zpid_', item_s) street = crawl_one_prop('streetAddress">', item_s) city = crawl_one_prop('addressLocality">', item_s) state = crawl_one_prop('addressRegion">', item_s) zipcode = crawl_one_prop('postalCode" class="hide">', item_s) price = crawl_one_prop('class="zsg-photo-card-price">$', item_s) sold = crawl_one_prop('</span>SOLD: $', item_s) sold_time = crawl_one_prop('Sold ', item_s) lat = crawl_one_prop('<meta itemprop="latitude" content="', item_s) lon = crawl_one_prop('<meta itemprop="longitude" content="', item_s) price_sqft = crawl_one_prop('Price/sqft: $', item_s) bed_b = crawl_one_prop('"bed":', item_s) bath_b = crawl_one_prop('"bath":', item_s) sqft_b = crawl_one_prop('"sqft":', item_s) bed_a = crawl_one_prop_backward(' bds', item_s) bath_a = crawl_one_prop_backward(' ba', item_s) sqft_a = crawl_one_prop_backward(' sqft', item_s) bed = compare_and_choose(bed_a, bed_b) bath = compare_and_choose(bath_a, bath_b) sqft = compare_and_choose(sqft_a, sqft_b) # return [zpid, sold_time, lat, lon, price_sqft] return [zpid, street, city, state, zipcode, price, sold, sold_time, price_sqft, lat, lon, bed, bath, sqft] def compare_and_choose(a, b): if a == '-1' and b == '-1': return '-1' elif a == '-1': return b elif b == '-1': return a else: if float(a) != float(b): print ('reading error') raise ValueError else: return a def crawl_one_prop_backward(string_tail, item_s): pos = item_s.find(string_tail) if pos != -1: end = pos pos -= 1 while item_s[pos] != '>' and item_s[pos] != '"' and item_s[pos] != ' ' : pos -= 1 value = item_s[pos+1:end] else: value = '-1' value = value.replace(',', '') value = value.replace('+', '') value = value.replace(';', '') if value == 'null' or value == '--' or value == '0': value = '-1' return value def crawl_one_prop(string_head, item_s): pos = item_s.find(string_head) if pos != -1: start = pos+len(string_head) for i in range(start, len(item_s)): if item_s[i] =='"' or item_s[i] == '<' or item_s[i] == '}': value = item_s[start:i] break else: value = '-1' value = value.replace(',', '') value = value.replace('+', '') value = value.replace(';', '') if value == 'null' or value == '--' or value == '0': value = '-1' return value def crawl(z, price_range, page): url = 'http://www.zillow.com/homes/'+str(z)+'_rb/'+str(price_range[0])+'-'+str(price_range[1])+'_price/11_zm/1_rs/'+str(page)+'_p/' session.headers.update({'Referer': url}) res = session.get(url) content = res.content list_item = split_each_item(str(content)) return list_item def main(file_name): # 3 parameters you may want to change # n_page determines how many page you want to search in this zipcode and price range list_zip = range(60601, 60627) + range(60628, 60648) + [60649, 60651, 60652, 60653, 60655, 60656, 60657, 60659, 60660, 60661, 60666, 60667, 60827] assert len(list_zip) == 59 # list_zip = [60827] list_price = [0, 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, 1000000, 5000000] n_page = 20 array_data_head = ['zpid', 'street', 'city', 'state', 'zipcode', 'price', 'sold', 'soldTime', 'priceSqft', 'latitude', 'longitude', 'numBedrooms', 'numBathrooms', 'sqft'] # array_data_head = ['zpid', 'soldTime', 'lat', 'lon', 'priceSqft'] array_data = [] for z in list_zip: for p in range(len(list_price)): if p == len(list_price) -1 : price_range = [list_price[p], 10000000] else: price_range = [list_price[p], list_price[p+1]] print ('zip: '+str(z)+',' +'price range: '+str(price_range[0])+'-'+str(price_range[1])) for page in range(n_page): print ('page: '+str(page)) list_item = crawl(z, price_range, page) time.sleep(5) if len(list_item) == 0: break array_data += list_item index_zpid = array_data_head.index('zpid') array_data = np.array(array_data) list_unique, unique_indecies = np.unique(array_data[:,index_zpid], return_index = True) # output the data print (len(array_data)) array_data = array_data[unique_indecies] print (len(array_data)) keep_list = [] del_list = [] list_must_have_attr = ['zpid', 'lat','lon','priceSqft'] for i in range(len(array_data)): for attr in list_must_have_attr: index_attr = array_data_head.index(attr) if array_data[i, index_attr] == '-1': del_list.append(i) break for ind in range(len(array_data)): if ind not in del_list: keep_list.append(ind) array_data = array_data[keep_list] for i in range(len(array_data)): for j in range(len(array_data[i])): array_data[i][j] = array_data[i][j].replace(',', '') f = open(file_name+'.csv', 'w') for i in range(len(array_data_head)): if i == len(array_data_head) -1: f.write(array_data_head[i]+'\n') else: f.write(array_data_head[i]+',') for r in range(len(array_data)): for i in range(len(array_data[r])): if i == len(array_data[r]) -1: f.write(array_data[r][i]+'\n') else: f.write(array_data[r][i]+',') f.close() main('house_source_extra')
mit
1,120,078,061,290,505,300
31.253456
175
0.519074
false
tosanai/wbai_hackathon_2017
agent/cognitive/module.py
1
5528
# coding: utf-8 import copy import os import brica1.gym import numpy as np import _pickle as pickle from ml.cnn_feature_extractor import CnnFeatureExtractor from ml.q_net import QNet from ml.experience import Experience from config.model import CNN_FEATURE_EXTRACTOR, CAFFE_MODEL, MODEL_TYPE from config.log import APP_KEY import logging app_logger = logging.getLogger(APP_KEY) use_gpu = int(os.getenv('GPU', '-1')) class VVCComponent(brica1.Component): image_feature_count = 1 cnn_feature_extractor = CNN_FEATURE_EXTRACTOR model = CAFFE_MODEL model_type = MODEL_TYPE image_feature_dim = 256 * 6 * 6 def __init__(self, n_output=10240, n_input=1): # image_feature_count = 1 super(VVCComponent, self).__init__() self.use_gpu = use_gpu self.n_output = n_output self.n_input = n_input def set_model(self, feature_extractor): self.feature_extractor = feature_extractor def load_model(self, cnn_feature_extractor): if os.path.exists(cnn_feature_extractor): app_logger.info("loading... {}".format(cnn_feature_extractor)) self.feature_extractor = pickle.load(open(cnn_feature_extractor, 'rb')) app_logger.info("done") else: self.feature_extractor = CnnFeatureExtractor(self.use_gpu, self.model, self.model_type, self.image_feature_dim) pickle.dump(self.feature_extractor, open(cnn_feature_extractor, 'wb')) app_logger.info("pickle.dump finished") def fire(self): observation = self.get_in_port('Isocortex#V1-Isocortex#VVC-Input').buffer obs_array = self.feature_extractor.feature(observation, self.image_feature_count) self.results['Isocortex#VVC-BG-Output'] = obs_array self.results['Isocortex#VVC-UB-Output'] = obs_array class BGComponent(brica1.Component): def __init__(self, n_input=10240, n_output=1): super(BGComponent, self).__init__() self.use_gpu = use_gpu self.epsilon = 1.0 actions = [0, 1, 2] epsilon_delta = 1.0 / 10 ** 4.4 min_eps = 0.1 self.input_dim = n_input self.q_net = QNet(self.use_gpu, actions, self.input_dim, self.epsilon, epsilon_delta, min_eps) def start(self): features = self.get_in_port('Isocortex#VVC-BG-Input').buffer action = self.q_net.start(features) return action def end(self, reward): # Episode Terminated app_logger.info('episode finished. Reward:{:.1f} / Epsilon:{:.6f}'.format(reward, self.epsilon)) self.replayed_experience = self.get_in_port('UB-BG-Input').buffer self.q_net.update_model(self.replayed_experience) def fire(self): reward = self.get_in_port('RB-BG-Input').buffer features = self.get_in_port('Isocortex#VVC-BG-Input').buffer self.replayed_experience = self.get_in_port('UB-BG-Input').buffer action, eps, q_max = self.q_net.step(features) self.q_net.update_model(self.replayed_experience) app_logger.info('Step:{} Action:{} Reward:{:.1f} Epsilon:{:.6f} Q_max:{:3f}'.format( self.q_net.time, self.q_net.action_to_index(action), reward[0], eps, q_max )) self.epsilon = eps self.results['BG-Isocortex#FL-Output'] = np.array([action]) class UBComponent(brica1.Component): def __init__(self): super(UBComponent, self).__init__() self.use_gpu = use_gpu data_size = 10**5 replay_size = 32 hist_size = 1 initial_exploration = 10**3 dim = 10240 self.experience = Experience(use_gpu=self.use_gpu, data_size=data_size, replay_size=replay_size, hist_size=hist_size, initial_exploration=initial_exploration, dim=dim) vvc_input = np.zeros((hist_size, dim), dtype=np.uint8) self.last_state = vvc_input self.state = vvc_input self.time = 0 def end(self, action, reward): self.time += 1 replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay = \ self.experience.end_episode(self.time, self.last_state, action, reward) self.results['UB-BG-Output'] = [replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay] def fire(self): self.state = self.get_in_port('Isocortex#VVC-UB-Input').buffer action, reward = self.get_in_port('Isocortex#FL-UB-Input').buffer self.experience.stock(self.time, self.last_state, action, reward, self.state, False) replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay, \ success_distance, action_distance = self.experience.replay(self.time) self.results['UB-BG-Output'] = [replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay, success_distance, action_distance] self.last_state = self.state.copy() self.time += 1 class FLComponent(brica1.Component): def __init__(self): super(FLComponent, self).__init__() self.last_action = np.array([0]) def fire(self): action = self.get_in_port('BG-Isocortex#FL-Input').buffer reward = self.get_in_port('RB-Isocortex#FL-Input').buffer self.results['Isocortex#FL-MO-Output'] = action self.results['Isocortex#FL-UB-Output'] = [self.last_action, reward] self.last_action = action
apache-2.0
-2,209,802,420,271,121,200
37.657343
118
0.625181
false
MioGit/RobotSimFramework
robots.py
1
7491
# -*- coding: utf-8 -*- # Python 2.7.8 """ Author: Isaac Sanchez Ruiz appllgc@gmail.com Copyright (C) 2014 Isaac Sanchez Ruiz This file is part of RobotSimFramework. RobotSimFramework is free software: you can redistribute it and/or modify it under the terms of the Lesser GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RobotSimFramework is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the Lesser GNU General Public License along with RobotSimFramework. If not, see <http://www.gnu.org/licenses/>. Description: Package with the code for the car robot. But just physically. """ import numpy as np import random as r import math import copy import worldHandling as wh # NOTE: all things starting with 'v' in this package are vectors and, therefore, I use Numpy arrays for them # FUNCTIONS def expErrorWithDistance(errordistancerate = 0.1): # exponential error that increases with the given distance def func_errorwithdistance(realdistance): if realdistance > 0.0: deviation = np.exp(errordistancerate * realdistance) - 1.0 # the minimum in the exponential is 1.0 and I want to assume there is no error if realdistance is 0.0 return float(np.random.normal(0.0, deviation, 1)) + realdistance # error over real distance + real distance elif realdistance < 0.0: deviation = np.exp(errordistancerate * -realdistance) - 1.0 return realdistance - float(np.random.normal(0.0, deviation, 1)) else: return realdistance # which is 0.0 return func_errorwithdistance def percentageError(percentage = 0.1, noise = "mixed"): if noise == "chaotic": # linear error that increases with the given distance randomly (that's why it's noisy) return lambda realdistance: r.uniform(-1,1) * percentage * realdistance + realdistance elif noise == "mixed": # works sort of like a fractal error = r.uniform(-1,1) * percentage # random initial error which will give most of the error value return lambda realdistance: r.random() * error * realdistance + realdistance elif noise == "none": # gives a constant error to the given distance error = r.uniform(-1,1) * percentage return lambda realdistance: error * realdistance + realdistance else: raise Exception("Wrong error selected") # CLASSES class DiscreteBeamSensor(wh.SpaceReference, wh.Orientable): def __init__(self, v_startpos, orientation, steplength, maxdistance, func_errorwithdistance, handler, exceptions = []): # TODO: check arguments correctness super(DiscreteBeamSensor, self).__init__(v_position = v_startpos, orientation = orientation) self.steplength = steplength # remember it is a discrete beam check self.maxdistance = maxdistance # max distance it covers self.func_errorwithdistance = func_errorwithdistance self.handler = handler # it has a handler, but it ain't no physical object, it occupies no space and in this simulation: no space, no physics self.exceptions = exceptions self.lastHitPoint = self.updateHitPoint() def updateHitPoint(self): v_dir = np.array([math.cos(self.orientation.getRadians()), math.sin(self.orientation.getRadians())]) # already normalized v_pos = np.copy(self.v_position) v_dir = self.steplength * v_dir iterleft = self.maxdistance / self.steplength # number of iterations needed to cover the desired distance while iterleft > 0: if self.handler.occupiedExcept(v_pos, self.exceptions): iterleft = 0 # we stop the process cause there's a hit else: v_pos += v_dir iterleft -= 1 self.lastHitPoint = v_pos # update the last one return v_pos def estimatedDistanceToHit(self): distance = np.linalg.norm(self.updateHitPoint() - self.v_position) return self.func_errorwithdistance(distance) class Robot(wh.PhysicalEntity, wh.Orientable): # just the very basic rotating circular robot (works like a typical tank) def __init__(self, v_position, radius, orientation, handler, func_odometryerror, func_compasserror, frontsensors = [], leftsensors = [], rightsensors = []): # TODO: check arguments correctness super(Robot, self).__init__(space = wh.CircularVolume(v_position, radius), handler = handler, orientation = orientation) self.rotation = 0.0 # rads per second self.speed = 0.0 # units per second # TODO: treat the odometer and the compass as separate classes (they're sensors after all) self.func_odometryerror = func_odometryerror self.func_compasserror = func_compasserror self.v_pos_odometry = np.copy(v_position) self.orient_compass = copy.copy(orientation) self.frontsensors = frontsensors self.leftsensors = leftsensors self.rightsensors = rightsensors def update(self, timeStep): # I assume timeStep is in milliseconds v_previous = np.copy(self.space.v_position) dist_travel = self.speed * timeStep * 10**(-3) ang_rotated = self.rotation * timeStep * 10**(-3) self.space.v_position += dist_travel * np.array([math.cos(self.orientation.getRadians()), math.sin(self.orientation.getRadians())]) self.orientation.setRadians(self.orientation.getRadians() + ang_rotated) self.orient_compass.setRadians(self.orient_compass.getRadians() + self.func_compasserror(ang_rotated)) # we compute the preceived rotation for obj in self.handler.allPhysicalEntities(): if obj is not self and self.space.intersects(obj.space): # in case of collision with another object self.space.v_position[:] = v_previous[:] # copy previous "legal" values return # no need to check anything more nor to change the odometry measurements self.v_pos_odometry += self.func_odometryerror(dist_travel) * np.array([math.cos(self.orient_compass.getRadians()), math.sin(self.orient_compass.getRadians())]) # we also compute the perceived position if there was no hit class SimplestRC(wh.PhysicalEntity): # just the very basic non-rotating RC (radio controlled) "vehicle" or whatever you wanna call it, it works just like a circle that moves in any direction def __init__(self, v_position, radius, handler): # TODO: check arguments correctness super(Robot, self).__init__(space = wh.CircularVolume(v_position, radius), handler = handler) self.rotation = 0.0 # rads per second self.v_speed = np.array([0., 0.]) # units per second def update(self, timeStep): # I assume timeStep is in milliseconds v_previous = np.copy(self.space.v_position) self.space.v_position += self.v_speed * 10**(-3) for obj in self.handler.allPhysicalEntities(): if obj is not self and self.space.intersects(obj.space): # in case of collision with another object self.space.v_position[:] = v_previous[:] # copy previous "legal" values return # no need to check anything more nor to change the odometry measurements
gpl-3.0
-5,374,866,057,185,673,000
52.12766
229
0.690696
false
Eric89GXL/vispy
vispy/glsl/build-spatial-filters.py
1
20537
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # glumpy is an OpenGL framework for the fast visualization of numpy arrays. # Copyright (C) 2009-2011 Nicolas P. Rougier. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- ''' A filter is a shader that transform the current displayed texture. Since shaders cannot be easily serialized within the GPU, they have to be well structured on the python side such that we can possibly merge them into a single source code for both vertex and fragment. Consequently, there is a default code for both vertex and fragment with specific entry points such that filter knows where to insert their specific code (declarations, functions and call (or code) to be inserted in the main function). Spatial interpolation filter classes for OpenGL textures. Each filter generates a one-dimensional lookup table (weights value from 0 to ceil(radius)) that is uploaded to video memory (as a 1d texture) and is then read by the shader when necessary. It avoids computing weight values for each pixel. Furthemore, each 2D-convolution filter is separable and can be computed using 2 1D-convolution with same 1d-kernel (= the lookup table values). Available filters: - Nearest (radius 0.5) - Bilinear (radius 1.0) - Hanning (radius 1.0) - Hamming (radius 1.0) - Hermite (radius 1.0) - Kaiser (radius 1.0) - Quadric (radius 1.5) - Bicubic (radius 2.0) - CatRom (radius 2.0) - Mitchell (radius 2.0) - Spline16 (radius 2.0) - Spline36 (radius 4.0) - Gaussian (radius 2.0) - Bessel (radius 3.2383) - Sinc (radius 4.0) - Lanczos (radius 4.0) - Blackman (radius 4.0) Note:: Weights code has been translated from the antigrain geometry library available at http://www.antigrain.com/ ''' import math import numpy as np class SpatialFilter(object): ''' ''' def __init__(self, radius=1.0): self.radius = radius def weight(self, x): ''' Return filter weight for a distance x. :Parameters: ``x`` : 0 < float < ceil(self.radius) Distance to be used to compute weight. ''' raise NotImplementedError def kernel(self, size=4*512): radius = self.radius r = int(max(1.0, math.ceil(radius))) samples = int(size / r) n = size # r*samples kernel = np.zeros(n) X = np.linspace(0, r, n) for i in range(n): kernel[i] = self.weight(X[i]) N = np.zeros(samples) for i in range(r): N += kernel[::+1][i*samples:(i+1)*samples] N += kernel[::-1][i*samples:(i+1)*samples] for i in range(r): kernel[i*samples:(i+1)*samples:+1] /= N return kernel def filter_code(self): n = int(math.ceil(self.radius)) filter_1 = 'filter1D_radius%d' % n filter_2 = 'filter2D_radius%d' % n code = '' code += 'vec4\n' code += '%s( sampler2D kernel, float index, float x, ' % filter_1 for i in range(2*n): if i == 2*n-1: code += 'vec4 c%d )\n' % i else: code += 'vec4 c%d, ' % i code += '{\n' code += ' float w, w_sum = 0.0;\n' code += ' vec4 r = vec4(0.0,0.0,0.0,0.0);\n' for i in range(n): code += ' w = unpack_interpolate(kernel, vec2(%f+(x/%.1f), index));\n' % (1.0 - (i + 1) / float(n), n) # noqa code += ' w = w*kernel_scale + kernel_bias;\n' # noqa # code += ' w_sum += w;' code += ' r += c%d * w;\n' % i code += ' w = unpack_interpolate(kernel, vec2(%f-(x/%.1f), index));\n' % ((i+1)/float(n), n) # noqa code += ' w = w*kernel_scale + kernel_bias;\n' # code += ' w_sum += w;' code += ' r += c%d * w;\n' % (i + n) # code += ' return r/w_sum;\n' code += ' return r;\n' code += '}\n' code += "\n" code += 'vec4\n' code += '%s' % filter_2 code += '(sampler2D texture, sampler2D kernel, float index, vec2 uv, vec2 pixel)\n' # noqa code += '{\n' code += ' vec2 texel = uv/pixel - vec2(0.5, 0.5) ;\n' code += ' vec2 f = fract(texel);\n' code += ' texel = (texel-fract(texel) + vec2(0.001, 0.001)) * pixel;\n' # noqa for i in range(2*n): code += ' vec4 t%d = %s(kernel, index, f.x,\n' % (i, filter_1) for j in range(2*n): x, y = (-n+1+j, -n+1+i) code += ' texture2D( texture, texel + vec2(%d, %d) * pixel),\n' % (x, y) # noqa # Remove last trailing',' and close function call code = code[:-2] + ');\n' code += ' return %s(kernel, index, f.y, ' % filter_1 for i in range(2*n): code += 't%d, ' % i # Remove last trailing',' and close function call code = code[:-2] + ');\n' code += '}\n' return code def call_code(self, index): code = "" n = int(math.ceil(self.radius)) filter_1 = 'filter1D_radius%d' % n # noqa filter_2 = 'filter2D_radius%d' % n code += 'vec4 %s(sampler2D texture, vec2 shape, vec2 uv)\n' % self.__class__.__name__ # noqa code += '{' code += ' return %s(texture, u_kernel, %f, uv, 1.0/shape); ' % (filter_2, index) # noqa code += '}\n' return code class Nearest(SpatialFilter): ''' Nearest (=None) filter (radius = 0.5). Weight function:: w(x) = 1 ''' def __init__(self): SpatialFilter.__init__(self, radius=.5) def weight(self, x): return 1.0 def _get_code(self): self.build_LUT() code = 'vec4\n' code += 'interpolate(sampler2D texture, sampler1D kernel, vec2 uv, vec2 pixel)\n' # noqa code += '{\n return texture2D(texture, uv);\n}\n' return code code = property(_get_code, doc='''filter functions code''') class Bilinear(SpatialFilter): ''' Bilinear filter (radius = 1.0). Weight function:: w(x) = 1 - x ''' def __init__(self): SpatialFilter.__init__(self, radius=1.0) def weight(self, x): return 1.0 - x class Hanning(SpatialFilter): ''' Hanning filter (radius = 1.0). Weight function:: w(x) = 0.5 + 0.5 * cos(pi * x) ''' def __init__(self): SpatialFilter.__init__(self, radius=1.0) def weight(self, x): return 0.5 + 0.5 * math.cos(math.pi * x) class Hamming(SpatialFilter): ''' Hamming filter (radius = 1.0). Weight function:: w(x) = 0.54 + 0.46 * cos(pi * x) ''' def __init__(self): SpatialFilter.__init__(self, radius=1.0) def weight(self, x): return 0.54 + 0.46 * math.cos(math.pi * x) class Hermite(SpatialFilter): ''' Hermite filter (radius = 1.0). Weight function:: w(x) = (2*x-3)*x^2 + 1 ''' def __init__(self): SpatialFilter.__init__(self, radius=1.0) def weight(self, x): return (2.0 * x - 3.0) * x * x + 1.0 class Quadric(SpatialFilter): ''' Quadric filter (radius = 1.5). Weight function:: | 0.0 ≤ x < 0.5: 0.75 - x*x w(x) = | 0.5 ≤ x < 1.5: 0.5 - (x-1.5)^2 | 1.5 ≤ x : 0 ''' def __init__(self): SpatialFilter.__init__(self, radius=1.5) def weight(self, x): if x < 0.75: return 0.75 - x * x elif x < 1.5: t = x - 1.5 return 0.5 * t * t else: return 0.0 class Bicubic(SpatialFilter): ''' Bicubic filter (radius = 2.0). Weight function:: w(x) = 1/6((x+2)^3 - 4*(x+1)^3 + 6*x^3 -4*(x-1)^3) ''' def __init__(self): SpatialFilter.__init__(self, radius=2.0) def pow3(self, x): if x <= 0: return 0 else: return x * x * x def weight(self, x): return (1.0/6.0) * (self.pow3(x + 2) - 4 * self.pow3(x + 1) + 6 * self.pow3(x) - 4 * self.pow3(x - 1)) class Kaiser(SpatialFilter): ''' Kaiser filter (radius = 1.0). Weight function:: w(x) = bessel_i0(a sqrt(1-x^2)* 1/bessel_i0(b) ''' def __init__(self, b=6.33): self.a = b self.epsilon = 1e-12 self.i0a = 1.0 / self.bessel_i0(b) SpatialFilter.__init__(self, radius=1.0) def bessel_i0(self, x): s = 1.0 y = x * x / 4.0 t = y i = 2 while t > self.epsilon: s += t t *= float(y) / (i * i) i += 1 return s def weight(self, x): if x > 1: return 0 return self.bessel_i0(self.a * math.sqrt(1.0 - x * x)) * self.i0a class CatRom(SpatialFilter): ''' Catmull-Rom filter (radius = 2.0). Weight function:: | 0 ≤ x < 1: 0.5*(2 + x^2*(-5+x*3)) w(x) = | 1 ≤ x < 2: 0.5*(4 + x*(-8+x*(5-x))) | 2 ≤ x : 0 ''' def __init__(self, size=256*8): SpatialFilter.__init__(self, radius=2.0) def weight(self, x): if x < 1.0: return 0.5 * (2.0 + x * x * (-5.0 + x * 3.0)) elif x < 2.0: return 0.5 * (4.0 + x * (-8.0 + x * (5.0 - x))) else: return 0.0 class Mitchell(SpatialFilter): ''' Mitchell-Netravali filter (radius = 2.0). Weight function:: | 0 ≤ x < 1: p0 + x^2*(p2 + x*p3) w(x) = | 1 ≤ x < 2: q0 + x*(q1 + x*(q2 + x*q3)) | 2 ≤ x : 0 ''' def __init__(self, b=1.0/3.0, c=1.0/3.0): self.p0 = (6.0 - 2.0 * b) / 6.0 self.p2 = (-18.0 + 12.0 * b + 6.0 * c) / 6.0 self.p3 = (12.0 - 9.0 * b - 6.0 * c) / 6.0 self.q0 = (8.0 * b + 24.0 * c) / 6.0 self.q1 = (-12.0 * b - 48.0 * c) / 6.0 self.q2 = (6.0 * b + 30.0 * c) / 6.0 self.q3 = (-b - 6.0 * c) / 6.0 SpatialFilter.__init__(self, radius=2.0) def weight(self, x): if x < 1.0: return self.p0 + x * x * (self.p2 + x * self.p3) elif x < 2.0: return self.q0 + x * (self.q1 + x * (self.q2 + x * self.q3)) else: return 0.0 class Spline16(SpatialFilter): ''' Spline16 filter (radius = 2.0). Weight function:: | 0 ≤ x < 1: ((x-9/5)*x - 1/5)*x + 1 w(x) = | | 1 ≤ x < 2: ((-1/3*(x-1) + 4/5)*(x-1) - 7/15 )*(x-1) ''' def __init__(self): SpatialFilter.__init__(self, radius=2.0) def weight(self, x): if x < 1.0: return ((x - 9.0/5.0) * x - 1.0/5.0) * x + 1.0 else: return ((-1.0/3.0 * (x-1) + 4.0/5.0) * (x-1) - 7.0/15.0) * (x-1) class Spline36(SpatialFilter): ''' Spline36 filter (radius = 3.0). Weight function:: | 0 ≤ x < 1: ((13/11*x - 453/209)*x -3/209)*x +1 w(x) = | 1 ≤ x < 2: ((-6/11*(x-1) - 270/209)*(x-1) -156/209)*(x-1) | 2 ≤ x < 3: (( 1/11*(x-2) - 45/209)*(x-2) + 26/209)*(x-2) ''' def __init__(self): SpatialFilter.__init__(self, radius=3.0) def weight(self, x): if x < 1.0: return ((13.0/11.0 * x - 453.0/209.0) * x - 3.0/209.0) * x + 1.0 elif x < 2.0: return ((-6.0/11.0 * (x-1) + 270.0/209.0) * (x-1) - 156.0 / 209.0) * (x-1) # noqa else: return ((1.0 / 11.0 * (x-2) - 45.0/209.0) * (x - 2) + 26.0/209.0) * (x-2) # noqa class Gaussian(SpatialFilter): ''' Gaussian filter (radius = 2.0). Weight function:: w(x) = exp(-2x^2) * sqrt(2/pi) Note:: This filter does not seem to be correct since: x = np.linspace(0, 1.0, 100 ) f = weight z = f(x+1)+f(x)+f(1-x)+f(2-x) z should be 1 everywhere but it is not the case and it produces "grid effects". ''' def __init__(self): SpatialFilter.__init__(self, radius=2.0) def weight(self, x): return math.exp(-2.0 * x * x) * math.sqrt(2.0 / math.pi) class Bessel(SpatialFilter): ''' Bessel filter (radius = 3.2383). ''' def __init__(self): SpatialFilter.__init__(self, radius=3.2383) def besj(self, x, n): ''' Function BESJ calculates Bessel function of first kind of order n Arguments: x - value at which the Bessel function is required n - an integer (>=0), the order -------------------- C++ Mathematical Library Converted from equivalent FORTRAN library Converted by Gareth Walker for use by course 392 computational project All functions tested and yield the same results as the corresponding FORTRAN versions. If you have any problems using these functions please report them to M.Muldoon@UMIST.ac.uk Documentation available on the web http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html Version 1.0 8/98 29 October, 1999 -------------------- Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com) Adapted for use in vispy library by Nicolas P. Rougier (Nicolas.Rougier@inria.fr) ----------------------------------------------------------------------- ''' if n < 0: return 0.0 x = float(x) # force float type d = 1e-6 b = 0 if math.fabs(x) <= d: if n != 0: return 0 return 1 b1 = 0 # b1 is the value from the previous iteration # Set up a starting order for recurrence m1 = int(math.fabs(x)) + 6 if math.fabs(x) > 5: m1 = int(math.fabs(1.4 * x + 60 / x)) m2 = int(n + 2 + math.fabs(x) / 4) if m1 > m2: m2 = m1 # Apply recurrence down from current max order while True: c3 = 0 c2 = 1e-30 c4 = 0 m8 = 1 if m2 // 2 * 2 == m2: m8 = -1 imax = m2 - 2 for i in range(1, imax+1): c6 = 2 * (m2 - i) * c2 / x - c3 c3 = c2 c2 = c6 if m2 - i - 1 == n: b = c6 m8 = -1 * m8 if m8 > 0: c4 = c4 + 2 * c6 c6 = 2 * c2 / x - c3 if n == 0: b = c6 c4 += c6 b /= c4 if math.fabs(b - b1) < d: return b b1 = b m2 += 3 def weight(self, x): if x == 0.0: return math.pi/4.0 else: return self.besj(math.pi * x, 1) / (2.0 * x) class Sinc(SpatialFilter): ''' Sinc filter (radius = 4.0). Weight function:: ''' def __init__(self, size=256, radius=4.0): SpatialFilter.__init__(self, radius=max(radius, 2.0)) def weight(self, x): if x == 0.0: return 1.0 x *= math.pi return (math.sin(x) / x) class Lanczos(SpatialFilter): ''' Lanczos filter (radius = 4.0). Weight function:: ''' def __init__(self, size=256, radius=4.0): SpatialFilter.__init__(self, radius=max(radius, 2.0)) def weight(self, x): if x == 0.0: return 1.0 elif x > self.radius: return 0.0 x *= math.pi xr = x / self.radius return (math.sin(x) / x) * (math.sin(xr)/xr) class Blackman(SpatialFilter): ''' Blackman filter (radius = 4.0). ''' def __init__(self, size=256, radius=4.0): SpatialFilter.__init__(self, radius=max(radius, 2.0)) def weight(self, x): if x == 0.0: return 1.0 elif x > self.radius: return 0.0 x *= math.pi xr = x / self.radius return (math.sin(x) / x) * (0.42 + 0.5*math.cos(xr) + 0.08*math.cos(2*xr)) # noqa # Generate kernels texture (16 x 1024) filters = [Bilinear(), Hanning(), Hamming(), Hermite(), Kaiser(), Quadric(), Bicubic(), CatRom(), Mitchell(), Spline16(), Spline36(), Gaussian(), Bessel(), Sinc(), Lanczos(), Blackman()] n = 1024 K = np.zeros((16, n)) for i, f in enumerate(filters): K[i] = f.kernel(n) bias = K.min() scale = K.max()-K.min() K = (K-bias)/scale np.save("spatial-filters.npy", K.astype(np.float32)) print("// ------------------------------------") print("// Automatically generated, do not edit") print("// ------------------------------------") print("") print("const float kernel_bias = %f;" % bias) print("const float kernel_scale = %f;" % scale) print("const float kernel_size = %f;" % n) print("const vec4 bits = vec4(1.0, 1.0/256.0, 1.0/(256.0*256.0), 1.0/(256.0*256.0*256.0));") # noqa print("uniform sampler2D u_kernel;") print("") code = 'float\n' code += 'unpack_unit(vec4 rgba)\n' code += '{\n' code += '\t// return rgba.r; // uncomment this for r32f debugging\n' code += '\treturn dot(rgba, bits);\n' code += '}\n' print(code.expandtabs(4)) code = 'float\n' code += 'unpack_ieee(vec4 rgba)\n' code += '{\n' code += '\t// return rgba.r; // uncomment this for r32f debugging\n' code += '\trgba.rgba = rgba.abgr * 255.;\n' code += '\tfloat sign = 1.0 - step(128.0,rgba[0])*2.0;\n' code += '\tfloat exponent = 2.0 * mod(rgba[0],128.0) + ' \ 'step(128.0,rgba[1]) - 127.0;\n' code += '\tfloat mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 + ' \ 'rgba[3] + float(0x800000);\n' code += '\treturn sign * exp2(exponent) * (mantissa * exp2(-23.));\n' code += '}\n' print(code.expandtabs(4)) code = 'float\n' code += 'unpack_interpolate(sampler2D kernel, vec2 uv)\n' code += '{\n' code += '\t// return texture2D(kernel, uv).r; ' \ '//uncomment this for r32f debug without interpolation\n' code += '\tfloat kpixel = 1. / kernel_size;\n' code += '\tfloat u = uv.x / kpixel;\n' code += '\tfloat v = uv.y;\n' code += '\tfloat uf = fract(u);\n' code += '\tu = (u - uf) * kpixel;\n' code += '\n' code += '\tfloat d0 = unpack_unit(texture2D(kernel, vec2(u, v)));\n' code += '\tfloat d1 = unpack_unit(texture2D(kernel, vec2(u + 1. * kpixel, v)));\n' # noqa code += '\treturn mix(d0, d1, uf);\n' code += '}\n' print(code.expandtabs(4)) F = SpatialFilter(1.0) print(F.filter_code()) F = SpatialFilter(2.0) print(F.filter_code()) F = SpatialFilter(3.0) print(F.filter_code()) F = SpatialFilter(4.0) print(F.filter_code()) # Generate filter functions # Special case for nearest print("""vec4 Nearest(sampler2D texture, vec2 shape, vec2 uv)""") print("""{ return texture2D(texture,uv); }\n""") for i, f in enumerate(filters): print(f.call_code((i+0.5)/16.0))
bsd-3-clause
5,926,030,809,813,368,000
27.564067
125
0.511044
false
michaelaye/spexpy
guide_movie.py
1
1229
import numpy as np import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import matplotlib.animation as manimation import pyfits import glob import os from scipy.ndimage import median_filter os.chdir('/raid1/maye/irtf/140918/guidedog/scan1') def read_scan1(fname): data = pyfits.getdata(fname, 0) return data[248:394, :] def read_guide(fname): return pyfits.getdata(fname, 0) fitsfiles = glob.glob('sylvester*.fits') fitsfiles.sort() FFMpegWriter = manimation.writers['ffmpeg'] metadata = dict(title='IRTF Scan 1', artist='Matplotlib', comment='Scan 1, rows 248:394') writer = FFMpegWriter(fps=5, metadata=metadata) fig = plt.figure(figsize=(10,10)) handle = plt.imshow(np.zeros((512, 512)),cmap='gray', interpolation='nearest') plt.axis('off') #plt.xlim(-5, 5) #plt.ylim(-5, 5) tomovie = fitsfiles with writer.saving(fig, "/u/paige/maye/guide_movie.mp4", 100): for fitsfile in tomovie: print fitsfile nr = fitsfile.split('-')[1][:5] handle.get_axes().set_title(nr) data = read_guide(fitsfile) cleaned = median_filter(data, size=3) handle.set_data(cleaned) handle.autoscale() writer.grab_frame()
bsd-3-clause
-6,363,818,092,243,812,000
26.311111
62
0.676973
false
plotly/octogrid
setup.py
1
1811
from os.path import dirname, join from octogrid import __version__ from setuptools import setup def read(filename): """ Read content from utility files """ return open(join(dirname(__file__), filename)).read() setup( name='octogrid', version=__version__, author='Pravendra Singh', author_email='hackpravj@gmail.com', description=('GitHub following network visualizer for Humans'), license = 'MIT', keywords = 'github network graph plotly plot chart visualization', url = 'https://github.com/pravj/octogrid', packages=['octogrid', 'octogrid.parser', 'octogrid.generator', 'octogrid.auth', 'octogrid.builder', 'octogrid.publisher', 'octogrid.store', 'octogrid.utils'], install_requires=[ 'colorlover>=0.2.1', 'decorator>=4.0.6', 'docopt>=0.6.2', 'github3.py>=0.9.4', 'ipython>=4.0.3', 'ipython-genutils>=0.1.0', 'path.py>=8.1.2', 'pexpect>=4.0.1', 'pickleshare>=0.6', 'plotly>=1.9.5', 'ptyprocess>=0.5', 'python-igraph>=0.7.1.post6', 'pytz>=2015.7', 'requests>=2.9.1', 'simplegeneric>=0.8.1', 'six>=1.10.0', 'traitlets>=4.1.0', 'uritemplate.py>=0.3.0', 'wheel>=0.24.0' ], long_description=read('README.rst'), entry_points={ 'console_scripts': ['octogrid = octogrid.octogrid:main'] }, classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7' ] )
mit
-7,434,494,033,731,529,000
29.694915
162
0.575373
false
valiantljk/graph-partition
drawing/nx_pylab.py
1
29867
""" ********** Matplotlib ********** Draw networks with matplotlib. See Also -------- matplotlib: http://matplotlib.org/ pygraphviz: http://pygraphviz.github.io/ """ # Copyright (C) 2004-2012 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx from networkx.drawing.layout import shell_layout,\ circular_layout,spectral_layout,spring_layout,random_layout __author__ = """Aric Hagberg (hagberg@lanl.gov)""" __all__ = ['draw', 'draw_networkx', 'draw_networkx_nodes', 'draw_networkx_edges', 'draw_networkx_labels', 'draw_networkx_edge_labels', 'draw_circular', 'draw_random', 'draw_spectral', 'draw_spring', 'draw_shell', 'draw_graphviz'] def draw(G, pos=None, ax=None, hold=None, **kwds): """Draw the graph G with Matplotlib. Draw the graph as a simple representation with no node labels or edge labels and using the full Matplotlib figure area and no axis labels by default. See draw_networkx() for more full-featured drawing that allows title, axis labels etc. Parameters ---------- G : graph A networkx graph pos : dictionary, optional A dictionary with nodes as keys and positions as values. If not specified a spring layout positioning will be computed. See networkx.layout for functions that compute node positions. ax : Matplotlib Axes object, optional Draw the graph in specified Matplotlib axes. hold : bool, optional Set the Matplotlib hold state. If True subsequent draw commands will be added to the current axes. **kwds : optional keywords See networkx.draw_networkx() for a description of optional keywords. Examples -------- >>> G=nx.dodecahedral_graph() >>> nx.draw(G) >>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout See Also -------- draw_networkx() draw_networkx_nodes() draw_networkx_edges() draw_networkx_labels() draw_networkx_edge_labels() Notes ----- This function has the same name as pylab.draw and pyplot.draw so beware when using >>> from networkx import * since you might overwrite the pylab.draw function. With pyplot use >>> import matplotlib.pyplot as plt >>> import networkx as nx >>> G=nx.dodecahedral_graph() >>> nx.draw(G) # networkx draw() >>> plt.draw() # pyplot draw() Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if ax is None: cf = plt.gcf() else: cf = ax.get_figure() cf.set_facecolor('w') if ax is None: if cf._axstack() is None: ax = cf.add_axes((0, 0, 1, 1)) else: ax = cf.gca() if 'with_labels' not in kwds: kwds['with_labels'] = 'labels' in kwds b = plt.ishold() # allow callers to override the hold state by passing hold=True|False h = kwds.pop('hold', None) if h is not None: plt.hold(h) try: draw_networkx(G, pos=pos, ax=ax, **kwds) ax.set_axis_off() plt.draw_if_interactive() except: plt.hold(b) raise plt.hold(b) return def draw_networkx(G, pos=None, with_labels=True, **kwds): """Draw the graph G using Matplotlib. Draw the graph with Matplotlib with options for node positions, labeling, titles, and many other drawing features. See draw() for simple drawing without labels or axes. Parameters ---------- G : graph A networkx graph pos : dictionary, optional A dictionary with nodes as keys and positions as values. If not specified a spring layout positioning will be computed. See networkx.layout for functions that compute node positions. with_labels : bool, optional (default=True) Set to True to draw labels on the nodes. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. nodelist : list, optional (default G.nodes()) Draw only specified nodes edgelist : list, optional (default=G.edges()) Draw only specified edges node_size : scalar or array, optional (default=300) Size of nodes. If an array is specified it must be the same length as nodelist. node_color : color string, or array of floats, (default='r') Node color. Can be a single color format string, or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the cmap and vmin,vmax parameters. See matplotlib.scatter for more details. node_shape : string, optional (default='o') The shape of the node. Specification is as matplotlib.scatter marker, one of 'so^>v<dph8'. alpha : float, optional (default=1.0) The node and edge transparency cmap : Matplotlib colormap, optional (default=None) Colormap for mapping intensities of nodes vmin,vmax : float, optional (default=None) Minimum and maximum for node colormap scaling linewidths : [None | scalar | sequence] Line width of symbol border (default =1.0) width : float, optional (default=1.0) Line width of edges edge_color : color string, or array of floats (default='r') Edge color. Can be a single color format string, or a sequence of colors with the same length as edgelist. If numeric values are specified they will be mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters. edge_cmap : Matplotlib colormap, optional (default=None) Colormap for mapping intensities of edges edge_vmin,edge_vmax : floats, optional (default=None) Minimum and maximum for edge colormap scaling style : string, optional (default='solid') Edge line style (solid|dashed|dotted,dashdot) labels : dictionary, optional (default=None) Node labels in a dictionary keyed by node of text labels font_size : int, optional (default=12) Font size for text labels font_color : string, optional (default='k' black) Font color string font_weight : string, optional (default='normal') Font weight font_family : string, optional (default='sans-serif') Font family label : string, optional Label for graph legend Examples -------- >>> G=nx.dodecahedral_graph() >>> nx.draw(G) >>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout >>> import matplotlib.pyplot as plt >>> limits=plt.axis('off') # turn of axis Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html See Also -------- draw() draw_networkx_nodes() draw_networkx_edges() draw_networkx_labels() draw_networkx_edge_labels() """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if pos is None: pos = nx.drawing.spring_layout(G) # default to spring layout node_collection = draw_networkx_nodes(G, pos, **kwds) edge_collection = draw_networkx_edges(G, pos, **kwds) if with_labels: draw_networkx_labels(G, pos, **kwds) plt.draw_if_interactive() def draw_networkx_nodes(G, pos, nodelist=None, node_size=300, node_color='r', node_shape='o', alpha=1.0, cmap=None, vmin=None, vmax=None, ax=None, linewidths=None, label=None, **kwds): """Draw the nodes of the graph G. This draws only the nodes of the graph G. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. Positions should be sequences of length 2. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. nodelist : list, optional Draw only specified nodes (default G.nodes()) node_size : scalar or array Size of nodes (default=300). If an array is specified it must be the same length as nodelist. node_color : color string, or array of floats Node color. Can be a single color format string (default='r'), or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the cmap and vmin,vmax parameters. See matplotlib.scatter for more details. node_shape : string The shape of the node. Specification is as matplotlib.scatter marker, one of 'so^>v<dph8' (default='o'). alpha : float The node transparency (default=1.0) cmap : Matplotlib colormap Colormap for mapping intensities of nodes (default=None) vmin,vmax : floats Minimum and maximum for node colormap scaling (default=None) linewidths : [None | scalar | sequence] Line width of symbol border (default =1.0) label : [None| string] Label for legend Returns ------- matplotlib.collections.PathCollection `PathCollection` of the nodes. Examples -------- >>> G=nx.dodecahedral_graph() >>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html See Also -------- draw() draw_networkx() draw_networkx_edges() draw_networkx_labels() draw_networkx_edge_labels() """ try: import matplotlib.pyplot as plt import numpy except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if ax is None: ax = plt.gca() if nodelist is None: nodelist = G.nodes() if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing return None try: xy = numpy.asarray([pos[v] for v in nodelist]) except KeyError as e: raise nx.NetworkXError('Node %s has no position.'%e) except ValueError: raise nx.NetworkXError('Bad value in node positions.') node_collection = ax.scatter(xy[:, 0], xy[:, 1], s=node_size, c=node_color, marker=node_shape, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, linewidths=linewidths, label=label) node_collection.set_zorder(2) return node_collection def draw_networkx_edges(G, pos, edgelist=None, width=1.0, edge_color='k', style='solid', alpha=1.0, edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None, arrows=True, label=None, **kwds): """Draw the edges of the graph G. This draws only the edges of the graph G. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. Positions should be sequences of length 2. edgelist : collection of edge tuples Draw only specified edges(default=G.edges()) width : float, or array of floats Line width of edges (default=1.0) edge_color : color string, or array of floats Edge color. Can be a single color format string (default='r'), or a sequence of colors with the same length as edgelist. If numeric values are specified they will be mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters. style : string Edge line style (default='solid') (solid|dashed|dotted,dashdot) alpha : float The edge transparency (default=1.0) edge_ cmap : Matplotlib colormap Colormap for mapping intensities of edges (default=None) edge_vmin,edge_vmax : floats Minimum and maximum for edge colormap scaling (default=None) ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. arrows : bool, optional (default=True) For directed graphs, if True draw arrowheads. label : [None| string] Label for legend Returns ------- matplotlib.collection.LineCollection `LineCollection` of the edges Notes ----- For directed graphs, "arrows" (actually just thicker stubs) are drawn at the head end. Arrows can be turned off with keyword arrows=False. Yes, it is ugly but drawing proper arrows with Matplotlib this way is tricky. Examples -------- >>> G=nx.dodecahedral_graph() >>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html See Also -------- draw() draw_networkx() draw_networkx_nodes() draw_networkx_labels() draw_networkx_edge_labels() """ try: import matplotlib import matplotlib.pyplot as plt import matplotlib.cbook as cb from matplotlib.colors import colorConverter, Colormap from matplotlib.collections import LineCollection import numpy except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if ax is None: ax = plt.gca() if edgelist is None: edgelist = G.edges() if not edgelist or len(edgelist) == 0: # no edges! return None # set edge positions edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist]) if not cb.iterable(width): lw = (width,) else: lw = width if not cb.is_string_like(edge_color) \ and cb.iterable(edge_color) \ and len(edge_color) == len(edge_pos): if numpy.alltrue([cb.is_string_like(c) for c in edge_color]): # (should check ALL elements) # list of color letters such as ['k','r','k',...] edge_colors = tuple([colorConverter.to_rgba(c, alpha) for c in edge_color]) elif numpy.alltrue([not cb.is_string_like(c) for c in edge_color]): # If color specs are given as (rgb) or (rgba) tuples, we're OK if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4) for c in edge_color]): edge_colors = tuple(edge_color) else: # numbers (which are going to be mapped with a colormap) edge_colors = None else: raise ValueError('edge_color must consist of either color names or numbers') else: if cb.is_string_like(edge_color) or len(edge_color) == 1: edge_colors = (colorConverter.to_rgba(edge_color, alpha), ) else: raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges') edge_collection = LineCollection(edge_pos, colors=edge_colors, linewidths=lw, antialiaseds=(1,), linestyle=style, transOffset = ax.transData, ) edge_collection.set_zorder(1) # edges go behind nodes edge_collection.set_label(label) ax.add_collection(edge_collection) # Note: there was a bug in mpl regarding the handling of alpha values for # each line in a LineCollection. It was fixed in matplotlib in r7184 and # r7189 (June 6 2009). We should then not set the alpha value globally, # since the user can instead provide per-edge alphas now. Only set it # globally if provided as a scalar. if cb.is_numlike(alpha): edge_collection.set_alpha(alpha) if edge_colors is None: if edge_cmap is not None: assert(isinstance(edge_cmap, Colormap)) edge_collection.set_array(numpy.asarray(edge_color)) edge_collection.set_cmap(edge_cmap) if edge_vmin is not None or edge_vmax is not None: edge_collection.set_clim(edge_vmin, edge_vmax) else: edge_collection.autoscale() arrow_collection = None if G.is_directed() and arrows: # a directed graph hack # draw thick line segments at head end of edge # waiting for someone else to implement arrows that will work arrow_colors = edge_colors a_pos = [] p = 1.0-0.25 # make head segment 25 percent of edge length for src, dst in edge_pos: x1, y1 = src x2, y2 = dst dx = x2-x1 # x offset dy = y2-y1 # y offset d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge if d == 0: # source and target at same position continue if dx == 0: # vertical edge xa = x2 ya = dy*p+y1 if dy == 0: # horizontal edge ya = y2 xa = dx*p+x1 else: theta = numpy.arctan2(dy, dx) xa = p*d*numpy.cos(theta)+x1 ya = p*d*numpy.sin(theta)+y1 a_pos.append(((xa, ya), (x2, y2))) arrow_collection = LineCollection(a_pos, colors=arrow_colors, linewidths=[4*ww for ww in lw], antialiaseds=(1,), transOffset = ax.transData, ) arrow_collection.set_zorder(1) # edges go behind nodes arrow_collection.set_label(label) ax.add_collection(arrow_collection) # update view minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0])) maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0])) miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1])) maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1])) w = maxx-minx h = maxy-miny padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) ax.update_datalim(corners) ax.autoscale_view() # if arrow_collection: return edge_collection def draw_networkx_labels(G, pos, labels=None, font_size=12, font_color='k', font_family='sans-serif', font_weight='normal', alpha=1.0, bbox=None, ax=None, **kwds): """Draw node labels on the graph G. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. Positions should be sequences of length 2. labels : dictionary, optional (default=None) Node labels in a dictionary keyed by node of text labels font_size : int Font size for text labels (default=12) font_color : string Font color string (default='k' black) font_family : string Font family (default='sans-serif') font_weight : string Font weight (default='normal') alpha : float The text transparency (default=1.0) ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. Returns ------- dict `dict` of labels keyed on the nodes Examples -------- >>> G=nx.dodecahedral_graph() >>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html See Also -------- draw() draw_networkx() draw_networkx_nodes() draw_networkx_edges() draw_networkx_edge_labels() """ try: import matplotlib.pyplot as plt import matplotlib.cbook as cb except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if ax is None: ax = plt.gca() if labels is None: labels = dict((n, n) for n in G.nodes()) # set optional alignment horizontalalignment = kwds.get('horizontalalignment', 'center') verticalalignment = kwds.get('verticalalignment', 'center') text_items = {} # there is no text collection so we'll fake one for n, label in labels.items(): (x, y) = pos[n] if not cb.is_string_like(label): label = str(label) # this will cause "1" and 1 to be labeled the same t = ax.text(x, y, label, size=font_size, color=font_color, family=font_family, weight=font_weight, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, transform=ax.transData, bbox=bbox, clip_on=True, ) text_items[n] = t return text_items def draw_networkx_edge_labels(G, pos, edge_labels=None, label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal', alpha=1.0, bbox=None, ax=None, rotate=True, **kwds): """Draw edge labels. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. Positions should be sequences of length 2. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. alpha : float The text transparency (default=1.0) edge_labels : dictionary Edge labels in a dictionary keyed by edge two-tuple of text labels (default=None). Only labels for the keys in the dictionary are drawn. label_pos : float Position of edge label along edge (0=head, 0.5=center, 1=tail) font_size : int Font size for text labels (default=12) font_color : string Font color string (default='k' black) font_weight : string Font weight (default='normal') font_family : string Font family (default='sans-serif') bbox : Matplotlib bbox Specify text box shape and colors. clip_on : bool Turn on clipping at axis boundaries (default=True) Returns ------- dict `dict` of labels keyed on the edges Examples -------- >>> G=nx.dodecahedral_graph() >>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.github.io/documentation/latest/gallery.html See Also -------- draw() draw_networkx() draw_networkx_nodes() draw_networkx_edges() draw_networkx_labels() """ try: import matplotlib.pyplot as plt import matplotlib.cbook as cb import numpy except ImportError: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if ax is None: ax = plt.gca() if edge_labels is None: labels = dict(((u, v), d) for u, v, d in G.edges(data=True)) else: labels = edge_labels text_items = {} for (n1, n2), label in labels.items(): (x1, y1) = pos[n1] (x2, y2) = pos[n2] (x, y) = (x1 * label_pos + x2 * (1.0 - label_pos), y1 * label_pos + y2 * (1.0 - label_pos)) if rotate: angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees # make label orientation "right-side-up" if angle > 90: angle -= 180 if angle < - 90: angle += 180 # transform data coordinate angle to screen coordinate angle xy = numpy.array((x, y)) trans_angle = ax.transData.transform_angles(numpy.array((angle,)), xy.reshape((1, 2)))[0] else: trans_angle = 0.0 # use default box of white with white border if bbox is None: bbox = dict(boxstyle='round', ec=(1.0, 1.0, 1.0), fc=(1.0, 1.0, 1.0), ) if not cb.is_string_like(label): label = str(label) # this will cause "1" and 1 to be labeled the same # set optional alignment horizontalalignment = kwds.get('horizontalalignment', 'center') verticalalignment = kwds.get('verticalalignment', 'center') t = ax.text(x, y, label, size=font_size, color=font_color, family=font_family, weight=font_weight, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, rotation=trans_angle, transform=ax.transData, bbox=bbox, zorder=1, clip_on=True, ) text_items[(n1, n2)] = t return text_items def draw_circular(G, **kwargs): """Draw the graph G with a circular layout. Parameters ---------- G : graph A networkx graph **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the pos parameter which is not used by this function. """ draw(G, circular_layout(G), **kwargs) def draw_random(G, **kwargs): """Draw the graph G with a random layout. Parameters ---------- G : graph A networkx graph **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the pos parameter which is not used by this function. """ draw(G, random_layout(G), **kwargs) def draw_spectral(G, **kwargs): """Draw the graph G with a spectral layout. Parameters ---------- G : graph A networkx graph **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the pos parameter which is not used by this function. """ draw(G, spectral_layout(G), **kwargs) def draw_spring(G, **kwargs): """Draw the graph G with a spring layout. Parameters ---------- G : graph A networkx graph **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the pos parameter which is not used by this function. """ draw(G, spring_layout(G), **kwargs) def draw_shell(G, **kwargs): """Draw networkx graph with shell layout. Parameters ---------- G : graph A networkx graph **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the pos parameter which is not used by this function. """ nlist = kwargs.get('nlist', None) if nlist is not None: del(kwargs['nlist']) draw(G, shell_layout(G, nlist=nlist), **kwargs) def draw_graphviz(G, prog="neato", **kwargs): """Draw networkx graph with graphviz layout. Parameters ---------- G : graph A networkx graph prog : string, optional Name of Graphviz layout program **kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords. """ pos = nx.drawing.graphviz_layout(G, prog) draw(G, pos, **kwargs) def draw_nx(G, pos, **kwds): """For backward compatibility; use draw or draw_networkx.""" draw(G, pos, **kwds) # fixture for nose tests def setup_module(module): from nose import SkipTest try: import matplotlib as mpl mpl.use('PS', warn=False) import matplotlib.pyplot as plt except: raise SkipTest("matplotlib not available")
gpl-2.0
8,915,198,073,067,984,000
29.199191
124
0.572639
false
AlchemicalChest/Gaussian-Process-with-Stochastic-Variational-Inference
GPSVI/test/test20newsgroups.py
1
1417
# -*- coding: utf-8 -*- """ Created on Thu Apr 30 11:24:55 2015 @author: Ziang """ import time import numpy as np import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.decomposition import TruncatedSVD from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from GPSVI.core.GPClassifier import GPClassifier np.random.seed(0) data = datasets.fetch_20newsgroups_vectorized() xTr, xTe, yTr, yTe = train_test_split(data.data, data.target, test_size=0.80) svd = TruncatedSVD(algorithm='randomized', n_components=3, tol=0.0) svd.fit(xTr) x = svd.transform(xTr) fig = plt.figure('Show data') ax = fig.add_subplot(111, projection='3d') ax.scatter(x[:,0], x[:,1], x[:,2], c=yTr, cmap=matplotlib.cm.rainbow) t0 = time.time() clf_lr = LogisticRegression(C=2.0) clf_lr.fit(xTr, yTr) lr_score = clf_lr.score(xTe, yTe) lr_t = time.time()-t0 t0 = time.time() clf_svc = SVC() clf_svc.fit(xTr, yTr) svc_score = clf_svc.score(xTe, yTe) svc_t = time.time()-t0 t0 = time.time() clf_gp = GPClassifier(xTr, yTr, \ alpha=0.05, max_iter=100, num_inducing_points=1500, \ kernel_type='rbf', kernel_args={'gamma':2.0}, \ learning_rate=0.01, verbose=2) clf_gp.fit() gp_score = clf_gp.score(xTe, yTe) gp_t = time.time()-t0
mit
-1,960,622,932,607,665,700
25.259259
77
0.690191
false
xray/xray
xarray/plot/plot.py
1
33300
""" Use this module directly: import xarray.plot as xplt Or use the methods on a DataArray or Dataset: DataArray.plot._____ Dataset.plot._____ """ import functools import numpy as np import pandas as pd from .facetgrid import _easy_facetgrid from .utils import ( _add_colorbar, _assert_valid_xy, _ensure_plottable, _infer_interval_breaks, _infer_xy_labels, _process_cmap_cbar_kwargs, _rescale_imshow_rgb, _resolve_intervals_1dplot, _resolve_intervals_2dplot, _update_axes, get_axis, import_matplotlib_pyplot, label_from_attrs, ) def _infer_line_data(darray, x, y, hue): ndims = len(darray.dims) if x is not None and y is not None: raise ValueError("Cannot specify both x and y kwargs for line plots.") if x is not None: _assert_valid_xy(darray, x, "x") if y is not None: _assert_valid_xy(darray, y, "y") if ndims == 1: huename = None hueplt = None huelabel = "" if x is not None: xplt = darray[x] yplt = darray elif y is not None: xplt = darray yplt = darray[y] else: # Both x & y are None dim = darray.dims[0] xplt = darray[dim] yplt = darray else: if x is None and y is None and hue is None: raise ValueError("For 2D inputs, please specify either hue, x or y.") if y is None: xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue) xplt = darray[xname] if xplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] yplt = darray.transpose(otherdim, huename, transpose_coords=False) xplt = xplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (xdim,) = darray[xname].dims (huedim,) = darray[huename].dims yplt = darray.transpose(xdim, huedim) else: yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue) yplt = darray[yname] if yplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] xplt = darray.transpose(otherdim, huename, transpose_coords=False) yplt = yplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (ydim,) = darray[yname].dims (huedim,) = darray[huename].dims xplt = darray.transpose(ydim, huedim) huelabel = label_from_attrs(darray[huename]) hueplt = darray[huename] xlabel = label_from_attrs(xplt) ylabel = label_from_attrs(yplt) return xplt, yplt, hueplt, xlabel, ylabel, huelabel def plot( darray, row=None, col=None, col_wrap=None, ax=None, hue=None, rtol=0.01, subplot_kws=None, **kwargs, ): """ Default plot of DataArray using matplotlib.pyplot. Calls xarray plotting function based on the dimensions of darray.squeeze() =============== =========================== Dimensions Plotting function --------------- --------------------------- 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Parameters ---------- darray : DataArray row : str, optional If passed, make row faceted plots on this dimension name col : str, optional If passed, make column faceted plots on this dimension name hue : str, optional If passed, make faceted line plots with hue on this dimension name col_wrap : int, optional Use together with ``col`` to wrap faceted plots ax : matplotlib.axes.Axes, optional If None, uses the current axis. Not applicable when using facets. rtol : float, optional Relative tolerance used to determine if the indexes are uniformly spaced. Usually a small positive number. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. **kwargs : optional Additional keyword arguments to matplotlib """ darray = darray.squeeze().compute() plot_dims = set(darray.dims) plot_dims.discard(row) plot_dims.discard(col) plot_dims.discard(hue) ndims = len(plot_dims) error_msg = ( "Only 1d and 2d plots are supported for facets in xarray. " "See the package `Seaborn` for more options." ) if ndims in [1, 2]: if row or col: kwargs["subplot_kws"] = subplot_kws kwargs["row"] = row kwargs["col"] = col kwargs["col_wrap"] = col_wrap if ndims == 1: plotfunc = line kwargs["hue"] = hue elif ndims == 2: if hue: plotfunc = line kwargs["hue"] = hue else: plotfunc = pcolormesh kwargs["subplot_kws"] = subplot_kws else: if row or col or hue: raise ValueError(error_msg) plotfunc = hist kwargs["ax"] = ax return plotfunc(darray, **kwargs) # This function signature should not change so that it can use # matplotlib format strings def line( darray, *args, row=None, col=None, figsize=None, aspect=None, size=None, ax=None, hue=None, x=None, y=None, xincrease=None, yincrease=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, add_legend=True, _labels=True, **kwargs, ): """ Line plot of DataArray index against values Wraps :func:`matplotlib:matplotlib.pyplot.plot` Parameters ---------- darray : DataArray Must be 1 dimensional figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : scalar, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. hue : string, optional Dimension or coordinate for which you want multiple lines plotted. If plotting against a 2D coordinate, ``hue`` must be a dimension. x, y : string, optional Dimension, coordinate or MultiIndex level for x, y axis. Only one of these may be specified. The other coordinate plots values from the DataArray on which this plot method is called. xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional Specifies scaling for the x- and y-axes respectively xticks, yticks : Specify tick locations for x- and y-axes xlim, ylim : Specify x- and y-axes limits xincrease : None, True, or False, optional Should the values on the x axes be increasing from left to right? if None, use the default for the matplotlib function. yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. add_legend : bool, optional Add legend with y axis coordinates (2D inputs only). *args, **kwargs : optional Additional arguments to matplotlib.pyplot.plot """ # Handle facetgrids first if row or col: allargs = locals().copy() allargs.update(allargs.pop("kwargs")) allargs.pop("darray") return _easy_facetgrid(darray, line, kind="line", **allargs) ndims = len(darray.dims) if ndims > 2: raise ValueError( "Line plots are for 1- or 2-dimensional DataArrays. " "Passed DataArray has {ndims} " "dimensions".format(ndims=ndims) ) # The allargs dict passed to _easy_facetgrid above contains args if args == (): args = kwargs.pop("args", ()) else: assert "args" not in kwargs ax = get_axis(figsize, size, aspect, ax) xplt, yplt, hueplt, xlabel, ylabel, hue_label = _infer_line_data(darray, x, y, hue) # Remove pd.Intervals if contained in xplt.values and/or yplt.values. xplt_val, yplt_val, xlabel, ylabel, kwargs = _resolve_intervals_1dplot( xplt.values, yplt.values, xlabel, ylabel, kwargs ) _ensure_plottable(xplt_val, yplt_val) primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs) if _labels: if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.set_title(darray._title_for_slice()) if darray.ndim == 2 and add_legend: ax.legend(handles=primitive, labels=list(hueplt.values), title=hue_label) # Rotate dates on xlabels # Do this without calling autofmt_xdate so that x-axes ticks # on other subplots (if any) are not deleted. # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots if np.issubdtype(xplt.dtype, np.datetime64): for xlabels in ax.get_xticklabels(): xlabels.set_rotation(30) xlabels.set_ha("right") _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return primitive def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs): """ Step plot of DataArray index against values Similar to :func:`matplotlib:matplotlib.pyplot.step` Parameters ---------- where : {"pre", "post", "mid"}, default: "pre" Define where the steps should be placed: - "pre": The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. - "post": The y value is continued constantly to the right from every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - "mid": Steps occur half-way between the *x* positions. Note that this parameter is ignored if one coordinate consists of :py:func:`pandas.Interval` values, e.g. as a result of :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. *args, **kwargs : optional Additional arguments following :py:func:`xarray.plot.line` """ if where not in {"pre", "post", "mid"}: raise ValueError("'where' argument to step must be " "'pre', 'post' or 'mid'") if ds is not None: if drawstyle is None: drawstyle = ds else: raise TypeError("ds and drawstyle are mutually exclusive") if drawstyle is None: drawstyle = "" drawstyle = "steps-" + where + drawstyle return line(darray, *args, drawstyle=drawstyle, **kwargs) def hist( darray, figsize=None, size=None, aspect=None, ax=None, xincrease=None, yincrease=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, **kwargs, ): """ Histogram of DataArray Wraps :func:`matplotlib:matplotlib.pyplot.hist` Plots N dimensional arrays by first flattening the array. Parameters ---------- darray : DataArray Can be any dimension figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : scalar, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. ax : matplotlib.axes.Axes, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. **kwargs : optional Additional keyword arguments to matplotlib.pyplot.hist """ ax = get_axis(figsize, size, aspect, ax) no_nan = np.ravel(darray.values) no_nan = no_nan[pd.notnull(no_nan)] primitive = ax.hist(no_nan, **kwargs) ax.set_title("Histogram") ax.set_xlabel(label_from_attrs(darray)) _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return primitive # MUST run before any 2d plotting functions are defined since # _plot2d decorator adds them as methods here. class _PlotMethods: """ Enables use of xarray.plot functions as attributes on a DataArray. For example, DataArray.plot.imshow """ __slots__ = ("_da",) def __init__(self, darray): self._da = darray def __call__(self, **kwargs): return plot(self._da, **kwargs) # we can't use functools.wraps here since that also modifies the name / qualname __doc__ = __call__.__doc__ = plot.__doc__ __call__.__wrapped__ = plot # type: ignore __call__.__annotations__ = plot.__annotations__ @functools.wraps(hist) def hist(self, ax=None, **kwargs): return hist(self._da, ax=ax, **kwargs) @functools.wraps(line) def line(self, *args, **kwargs): return line(self._da, *args, **kwargs) @functools.wraps(step) def step(self, *args, **kwargs): return step(self._da, *args, **kwargs) def _plot2d(plotfunc): """ Decorator for common 2d plotting logic Also adds the 2d plot method to class _PlotMethods """ commondoc = """ Parameters ---------- darray : DataArray Must be 2 dimensional, unless creating faceted plots x : string, optional Coordinate for x axis. If None use darray.dims[1] y : string, optional Coordinate for y axis. If None use darray.dims[0] figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : scalar, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. row : string, optional If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name col_wrap : int, optional Use together with ``col`` to wrap faceted plots xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional Specifies scaling for the x- and y-axes respectively xticks, yticks : Specify tick locations for x- and y-axes xlim, ylim : Specify x- and y-axes limits xincrease : None, True, or False, optional Should the values on the x axes be increasing from left to right? if None, use the default for the matplotlib function. yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. add_colorbar : bool, optional Adds colorbar to axis add_labels : bool, optional Use xarray metadata to label axes norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg must be None. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting one of these values will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. cmap : matplotlib colormap name or object, optional The mapping from data values to color space. If not provided, this will be either be ``viridis`` (if the function infers a sequential dataset) or ``RdBu_r`` (if the function infers a diverging dataset). When `Seaborn` is installed, ``cmap`` may also be a `seaborn` color palette. If ``cmap`` is seaborn color palette and the plot type is not ``contour`` or ``contourf``, ``levels`` must also be specified. colors : discrete colors to plot, optional A single color or a list of colors. If the plot type is not ``contour`` or ``contourf``, the ``levels`` argument is required. center : float, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. extend : {"neither", "both", "min", "max"}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional Split the colormap (cmap) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. infer_intervals : bool, optional Only applies to pcolormesh. If True, the coordinate intervals are passed to pcolormesh. If False, the original coordinates are used (this can be useful for certain map projections). The default is to always infer intervals, unless the mesh is irregular and plotted on a map projection. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only used for 2D and FacetGrid plots. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar. **kwargs : optional Additional arguments to wrapped matplotlib function Returns ------- artist : The same type of primitive artist that the wrapped matplotlib function returns """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps(plotfunc) def newplotfunc( darray, x=None, y=None, figsize=None, size=None, aspect=None, ax=None, row=None, col=None, col_wrap=None, xincrease=True, yincrease=True, add_colorbar=None, add_labels=True, vmin=None, vmax=None, cmap=None, center=None, robust=False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws=None, cbar_ax=None, cbar_kwargs=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, norm=None, **kwargs, ): # All 2d plots in xarray share this function signature. # Method signature below should be consistent. # Decide on a default for the colorbar before facetgrids if add_colorbar is None: add_colorbar = plotfunc.__name__ != "contour" imshow_rgb = plotfunc.__name__ == "imshow" and darray.ndim == ( 3 + (row is not None) + (col is not None) ) if imshow_rgb: # Don't add a colorbar when showing an image with explicit colors add_colorbar = False # Matplotlib does not support normalising RGB data, so do it here. # See eg. https://github.com/matplotlib/matplotlib/pull/10220 if robust or vmax is not None or vmin is not None: darray = _rescale_imshow_rgb(darray, vmin, vmax, robust) vmin, vmax, robust = None, None, False # Handle facetgrids first if row or col: allargs = locals().copy() del allargs["darray"] del allargs["imshow_rgb"] allargs.update(allargs.pop("kwargs")) # Need the decorated plotting function allargs["plotfunc"] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind="dataarray", **allargs) plt = import_matplotlib_pyplot() rgb = kwargs.pop("rgb", None) if rgb is not None and plotfunc.__name__ != "imshow": raise ValueError('The "rgb" keyword is only valid for imshow()') elif rgb is not None and not imshow_rgb: raise ValueError( 'The "rgb" keyword is only valid for imshow()' "with a three-dimensional array (per facet)" ) xlab, ylab = _infer_xy_labels( darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb ) # better to pass the ndarrays directly to plotting functions xval = darray[xlab].values yval = darray[ylab].values # check if we need to broadcast one dimension if xval.ndim < yval.ndim: dims = darray[ylab].dims if xval.shape[0] == yval.shape[0]: xval = np.broadcast_to(xval[:, np.newaxis], yval.shape) else: xval = np.broadcast_to(xval[np.newaxis, :], yval.shape) elif yval.ndim < xval.ndim: dims = darray[xlab].dims if yval.shape[0] == xval.shape[0]: yval = np.broadcast_to(yval[:, np.newaxis], xval.shape) else: yval = np.broadcast_to(yval[np.newaxis, :], xval.shape) elif xval.ndim == 2: dims = darray[xlab].dims else: dims = (darray[ylab].dims[0], darray[xlab].dims[0]) # May need to transpose for correct x, y labels # xlab may be the name of a coord, we have to check for dim names if imshow_rgb: # For RGB[A] images, matplotlib requires the color dimension # to be last. In Xarray the order should be unimportant, so # we transpose to (y, x, color) to make this work. yx_dims = (ylab, xlab) dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims) if dims != darray.dims: darray = darray.transpose(*dims, transpose_coords=True) # Pass the data as a masked ndarray too zval = darray.to_masked_array(copy=False) # Replace pd.Intervals if contained in xval or yval. xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__) yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__) _ensure_plottable(xplt, yplt, zval) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, zval.data, **locals(), _is_facetgrid=kwargs.pop("_is_facetgrid", False), ) if "contour" in plotfunc.__name__: # extend is a keyword argument only for contour and contourf, but # passing it to the colorbar is sufficient for imshow and # pcolormesh kwargs["extend"] = cmap_params["extend"] kwargs["levels"] = cmap_params["levels"] # if colors == a single color, matplotlib draws dashed negative # contours. we lose this feature if we pass cmap and not colors if isinstance(colors, str): cmap_params["cmap"] = None kwargs["colors"] = colors if "pcolormesh" == plotfunc.__name__: kwargs["infer_intervals"] = infer_intervals if "imshow" == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray") if subplot_kws is None: subplot_kws = dict() ax = get_axis(figsize, size, aspect, ax, **subplot_kws) primitive = plotfunc( xplt, yplt, zval, ax=ax, cmap=cmap_params["cmap"], vmin=cmap_params["vmin"], vmax=cmap_params["vmax"], norm=cmap_params["norm"], **kwargs, ) # Label the plot with metadata if add_labels: ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra)) ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra)) ax.set_title(darray._title_for_slice()) if add_colorbar: if add_labels and "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(darray) cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) elif cbar_ax is not None or cbar_kwargs: # inform the user about keywords which aren't used raise ValueError( "cbar_ax and cbar_kwargs can't be used with add_colorbar=False." ) # origin kwarg overrides yincrease if "origin" in kwargs: yincrease = None _update_axes( ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim ) # Rotate dates on xlabels # Do this without calling autofmt_xdate so that x-axes ticks # on other subplots (if any) are not deleted. # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots if np.issubdtype(xplt.dtype, np.datetime64): for xlabels in ax.get_xticklabels(): xlabels.set_rotation(30) xlabels.set_ha("right") return primitive # For use as DataArray.plot.plotmethod @functools.wraps(newplotfunc) def plotmethod( _PlotMethods_obj, x=None, y=None, figsize=None, size=None, aspect=None, ax=None, row=None, col=None, col_wrap=None, xincrease=True, yincrease=True, add_colorbar=None, add_labels=True, vmin=None, vmax=None, cmap=None, colors=None, center=None, robust=False, extend=None, levels=None, infer_intervals=None, subplot_kws=None, cbar_ax=None, cbar_kwargs=None, xscale=None, yscale=None, xticks=None, yticks=None, xlim=None, ylim=None, norm=None, **kwargs, ): """ The method should have the same signature as the function. This just makes the method work on Plotmethods objects, and passes all the other arguments straight through. """ allargs = locals() allargs["darray"] = _PlotMethods_obj._da allargs.update(kwargs) for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]: del allargs[arg] return newplotfunc(**allargs) # Add to class _PlotMethods setattr(_PlotMethods, plotmethod.__name__, plotmethod) return newplotfunc @_plot2d def imshow(x, y, z, ax, **kwargs): """ Image plot of 2d DataArray using matplotlib.pyplot Wraps :func:`matplotlib:matplotlib.pyplot.imshow` While other plot methods require the DataArray to be strictly two-dimensional, ``imshow`` also accepts a 3D array where some dimension can be interpreted as RGB or RGBA color channels and allows this dimension to be specified via the kwarg ``rgb=``. Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA data, by applying a single scaling factor and offset to all bands. Passing ``robust=True`` infers ``vmin`` and ``vmax`` :ref:`in the usual way <robust-plotting>`. .. note:: This function needs uniformly spaced coordinates to properly label the axes. Call DataArray.plot() to check. The pixels are centered on the coordinates values. Ie, if the coordinate value is 3.2 then the pixels for those coordinates will be centered on 3.2. """ if x.ndim != 1 or y.ndim != 1: raise ValueError( "imshow requires 1D coordinates, try using " "pcolormesh or contour(f)" ) # Centering the pixels- Assumes uniform spacing try: xstep = (x[1] - x[0]) / 2.0 except IndexError: # Arbitrary default value, similar to matplotlib behaviour xstep = 0.1 try: ystep = (y[1] - y[0]) / 2.0 except IndexError: ystep = 0.1 left, right = x[0] - xstep, x[-1] + xstep bottom, top = y[-1] + ystep, y[0] - ystep defaults = {"origin": "upper", "interpolation": "nearest"} if not hasattr(ax, "projection"): # not for cartopy geoaxes defaults["aspect"] = "auto" # Allow user to override these defaults defaults.update(kwargs) if defaults["origin"] == "upper": defaults["extent"] = [left, right, bottom, top] else: defaults["extent"] = [left, right, top, bottom] if z.ndim == 3: # matplotlib imshow uses black for missing data, but Xarray makes # missing data transparent. We therefore add an alpha channel if # there isn't one, and set it to transparent where data is masked. if z.shape[-1] == 3: alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype) if np.issubdtype(z.dtype, np.integer): alpha *= 255 z = np.ma.concatenate((z, alpha), axis=2) else: z = z.copy() z[np.any(z.mask, axis=-1), -1] = 0 primitive = ax.imshow(z, **defaults) return primitive @_plot2d def contour(x, y, z, ax, **kwargs): """ Contour plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.contour` """ primitive = ax.contour(x, y, z, **kwargs) return primitive @_plot2d def contourf(x, y, z, ax, **kwargs): """ Filled contour plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.contourf` """ primitive = ax.contourf(x, y, z, **kwargs) return primitive @_plot2d def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs): """ Pseudocolor plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh` """ # decide on a default for infer_intervals (GH781) x = np.asarray(x) if infer_intervals is None: if hasattr(ax, "projection"): if len(x.shape) == 1: infer_intervals = True else: infer_intervals = False else: infer_intervals = True if infer_intervals and ( (np.shape(x)[0] == np.shape(z)[1]) or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])) ): if len(x.shape) == 1: x = _infer_interval_breaks(x, check_monotonic=True) else: # we have to infer the intervals on both axes x = _infer_interval_breaks(x, axis=1) x = _infer_interval_breaks(x, axis=0) if infer_intervals and (np.shape(y)[0] == np.shape(z)[0]): if len(y.shape) == 1: y = _infer_interval_breaks(y, check_monotonic=True) else: # we have to infer the intervals on both axes y = _infer_interval_breaks(y, axis=1) y = _infer_interval_breaks(y, axis=0) primitive = ax.pcolormesh(x, y, z, **kwargs) # by default, pcolormesh picks "round" values for bounds # this results in ugly looking plots with lots of surrounding whitespace if not hasattr(ax, "projection") and x.ndim == 1 and y.ndim == 1: # not a cartopy geoaxis ax.set_xlim(x[0], x[-1]) ax.set_ylim(y[0], y[-1]) return primitive
apache-2.0
1,388,698,512,246,744,800
33.08393
106
0.593393
false
ECP-CANDLE/Benchmarks
Pilot3/P3B4/p3b4_baseline_keras2.py
1
4652
from __future__ import print_function import numpy as np import os, sys, gzip import time import keras from tf_mthcan import hcan import argparse import p3b4 as bmk import candle def initialize_parameters(default_model = 'p3b4_default_model.txt' ): # Build benchmark object p3b3Bmk = bmk.BenchmarkP3B3(bmk.file_path, default_model, 'keras', prog='p3b4_baseline', desc='Hierarchical Convolutional Attention Networks for data extraction from clinical reports - Pilot 3 Benchmark 4') # Initialize parameters gParameters = candle.finalize_parameters(p3b3Bmk) #bmk.logger.info('Params: {}'.format(gParameters)) return gParameters def fetch_data(gParameters): """ Downloads and decompresses the data if not locally available. Since the training data depends on the model definition it is not loaded, instead the local path where the raw data resides is returned """ path = gParameters['data_url'] fpath = candle.fetch_file(path + gParameters['train_data'], 'Pilot3', untar=True) return fpath def run(gParameters): #print( gParameters ) fpath = fetch_data(gParameters) # Get default parameters for initialization and optimizer functions kerasDefaults = candle.keras_default_config() learning_rate = gParameters[ 'learning_rate' ] batch_size = gParameters[ 'batch_size' ] epochs = gParameters[ 'epochs' ] dropout = gParameters[ 'dropout' ] embed_train = gParameters[ 'embed_train' ] optimizer = gParameters[ 'optimizer' ] if optimizer == 0: optimizer = 'adam' elif optimizer == 1: optimizer = 'adadelta' elif optimizer == 2: optimizer = 'sgd' elif optimizer == 3: optimizer = 'rmsprop' wv_len = gParameters[ 'wv_len' ] attention_size = gParameters[ 'attention_size' ] train_x = np.load( fpath + '/train_X.npy' ) train_y = np.load( fpath + '/train_Y.npy' ) test_x = np.load( fpath + '/test_X.npy' ) test_y = np.load( fpath + '/test_Y.npy' ) num_classes = [] for task in range( len( train_y[ 0, : ] ) ): cat = np.unique( train_y[ :, task ] ) num_classes.append( len( cat ) ) train_y[ :, task ] = [ np.where( cat == x )[ 0 ][ 0 ] for x in train_y[ :, task ] ] test_y[ :, task ] = [ np.where( cat == x )[ 0 ][ 0 ] for x in test_y[ :, task ] ] num_tasks = len( num_classes ) max_vocab = np.max( train_x ) max_vocab2 = np.max( test_x ) if max_vocab2 > max_vocab: max_vocab = max_vocab2 vocab_size = max_vocab + 1 vocab = np.random.rand( vocab_size, wv_len ) train_samples = train_x.shape[ 0 ] test_samples = test_x.shape[ 0 ] max_lines = 50 max_words = 30 train_x = train_x.reshape( ( train_x.shape[ 0 ], max_lines, max_words ) ) test_x = test_x.reshape( ( test_x.shape[ 0 ], max_lines, max_words ) ) #optional masking min_lines = 30 min_words = 5 mask = [] for i in range(train_samples+test_samples): doc_mask = np.ones((1,max_lines,max_words)) num_lines = np.random.randint(min_lines,max_lines) for j in range(num_lines): num_words = np.random.randint(min_words,max_words) doc_mask[0,j,:num_words] = 0 mask.append(doc_mask) mask = np.concatenate(mask,0) # train model model = hcan( vocab, num_classes, max_lines, max_words, attention_size= attention_size, dropout_rate = dropout, lr = learning_rate, optimizer= optimizer, embed_train = embed_train ) ret = model.train( train_x, [ np.array( train_y[ :, 0 ] ), np.array( train_y[ :, 1 ] ), np.array( train_y[ :, 2 ] ), np.array( train_y[ :, 3 ] ) ], batch_size= batch_size, epochs= epochs, validation_data= [ test_x, [ np.array( test_y[ :, 0 ] ), np.array( test_y[ :, 1 ] ), np.array( test_y[ :, 2 ] ), np.array( test_y[ :, 3 ] ) ] #[ np.array( test_y[ :, 0 ] ), np.array( test_y[ :, 1 ] ), np.array( test_y[ :, 2 ] ), np.array( test_y[ :, 3 ] ) ] ] ) return ret def main(): gParameters = initialize_parameters() avg_loss = run(gParameters) print( "Return: ", avg_loss.history[ 'val_loss' ][-1] ) if __name__ == '__main__': main() # try: # K.clear_session() # except AttributeError: # theano does not have this function # pass
mit
-1,971,844,952,446,747,100
26.204678
143
0.567068
false
karla3jo/menpo-old
menpo/io/mesh/base.py
1
13479
import abc from collections import namedtuple import commands import os.path as path import tempfile from menpo.io.base import Importer, find_alternative_files, \ map_filepath_to_importer from menpo.io.mesh.assimp import AIImporter from menpo.io.exceptions import MeshImportError from menpo.shape import TexturedTriMesh, TriMesh from vrml.vrml97.parser import buildParser as buildVRML97Parser import vrml.vrml97.basenodes as basenodes from vrml.node import NullNode import numpy as np from menpo.shape.mesh import ColouredTriMesh # TODO: Disconnect with AssimpImporter # This formalises the return type of a mesh importer (before building) # However, at the moment there is a disconnect between this and the # Assimp type, and at some point they should become the same object MeshInfo = namedtuple('MeshInfo', ['points', 'trilist', 'tcoords', 'colour_per_vertex']) def process_with_meshlabserver(file_path, output_dir=None, script_path=None, output_filetype=None, export_flags=None, meshlab_command='meshlabserver'): r""" Interface to `meshlabserver` to perform prepossessing on meshes before import. Returns a path to the result of the meshlabserver call, ready for import as usual. **Requires Meshlab to be installed**. Parameters ---------- file_path : string Absolute filepath to the mesh script_path : atring, optional If specified this script will be run on the input mesh. Default: ``None`` output_dir : string, optional The output directory for the processed mesh. Default: The users tmp directory. output_filetype : string, optional The output filetype desired from meshlabserver. Takes the form of an extension, eg ``obj``. Default: The same as the input mesh export_flags : string, optional Flags passed to the ``-om`` parameter. Allows for choosing what aspects of the model will be exported (normals, texture coords etc) meshlab_command : string, optional The meshlabserver executable to run. Default: 'meshlabserver' Returns ------- output_path : string The absolute filepath to the processed mesh. """ if output_dir is None: output_dir = tempfile.gettempdir() filename = path.split(file_path)[-1] if output_filetype is not None: file_root = path.splitext(filename)[0] output_filename = file_root + '.' + output_filetype else: output_filename = filename output_path = path.join(output_dir, output_filename) command = (meshlab_command + ' -i ' + file_path + ' -o ' + output_path) if script_path is not None: command += ' -s ' + script_path if export_flags is not None: command += ' -om ' + export_flags commands.getoutput(command) return output_path class MeshImporter(Importer): r""" Abstract base class for importing meshes. Searches in the directory specified by filepath for landmarks and textures with the same basename as the mesh. If found, they are automatically attached. If a texture is found then a :class:`menpo.shape.mesh.textured.TexturedTriMesh` is built, else a :class:`menpo.shape.mesh.base.Trimesh` is built. Parameters ---------- filepath : string Absolute filepath of the mesh. """ __metaclass__ = abc.ABCMeta def __init__(self, filepath): super(MeshImporter, self).__init__(filepath) self.meshes = [] self.attempted_texture_search = False self.relative_texture_path = None self.texture_importer = None def _build_texture_importer(self): r""" Search for a texture in the same directory as the mesh. If it exists, create an importer for it. """ if self.texture_path is None or not path.exists(self.texture_path): self.texture_importer = None else: # This import is here to avoid circular dependencies from menpo.io.extensions import image_types self.texture_importer = map_filepath_to_importer(self.texture_path, image_types) def _search_for_texture(self): r""" Tries to find a texture with the same name as the mesh. Returns -------- relative_texture_path : string The relative path to the texture or ``None`` if one can't be found """ # Stop searching every single time we access the property self.attempted_texture_search = True # This import is here to avoid circular dependencies from menpo.io.extensions import image_types try: return find_alternative_files('texture', self.filepath, image_types) except ImportError: return None @property def texture_path(self): """ Get the absolute path to the texture. Returns None if one can't be found. Makes it's best effort to find an appropriate texture by searching for textures with the same name as the mesh. Will only search for the path the first time ``texture_path`` is invoked. Sets the ``self.relative_texture_path`` attribute. Returns ------- texture_path : string Absolute filepath to the texture """ # Try find a texture path if we can if (self.relative_texture_path is None and not self.attempted_texture_search): self.relative_texture_path = self._search_for_texture() try: return path.join(self.folder, self.relative_texture_path) except AttributeError: return None @abc.abstractmethod def _parse_format(self): r""" Abstract method that handles actually building a mesh. This involves reading the mesh from disk and doing any necessary parsing. Should set the ``self.meshes`` attribute. Each mesh in ``self.meshes`` is expected to be an object with attributes: ======== ========================== name type ======== ========================== points double ndarray trilist int ndarray tcoords double ndarray (optional) ======== ========================== May also set the ``self.relative_texture_path`` if it is specified by the format. """ pass def build(self): r""" Overrides the :meth:`build <menpo.io.base.Importer.build>` method. Parse the format as defined by :meth:`_parse_format` and then search for valid textures and landmarks that may have been defined by the format. Build the appropriate type of mesh defined by parsing the format. May or may not be textured. Returns ------- meshes : list of :class:`menpo.shape.mesh.textured.TexturedTriMesh` or :class:`menpo.shape.mesh.base.Trimesh` If more than one mesh, returns a list of meshes. If only one mesh, returns the single mesh. """ # self._parse_format() self._build_texture_importer() meshes = [] for mesh in self.meshes: if self.texture_importer is not None: new_mesh = TexturedTriMesh(mesh.points.astype(np.float64), mesh.trilist, mesh.tcoords, self.texture_importer.build()) elif mesh.colour_per_vertex is not None: new_mesh = ColouredTriMesh(mesh.points, mesh.trilist, mesh.colour_per_vertex) else: new_mesh = TriMesh(mesh.points, mesh.trilist) meshes.append(new_mesh) if len(meshes) == 1: return meshes[0] else: return meshes class AssimpImporter(AIImporter, MeshImporter): """ Uses assimp to import meshes. The assimp importing is wrapped via cython, Parameters ---------- filepath : string Absolute filepath of the mesh. """ def __init__(self, filepath): super(AssimpImporter, self).__init__(filepath) def _parse_format(self): r""" Use assimp to build the mesh. Also, get the relative texture path. """ self.build_scene() # Properties should have different names because of multiple # inheritance self.relative_texture_path = self.assimp_texture_path class WRLImporter(MeshImporter): """ Allows importing VRML meshes. Uses a fork of PyVRML97 to do (hopefully) more robust parsing of VRML files. It should be noted that, unfortunately, this is a lot slower than the C++-based assimp importer. VRML allows non-triangular polygons, whilst our importer pipeline doesn't. Therefore, any non-triangular polygons are dropped. VRML also allows separate texture coordinate indices, which we do not support. To have a better formed mesh, try exporting the WRL as OBJ from Meshlab. Parameters ---------- filepath : string Absolute filepath of the mesh. """ def __init__(self, filepath): # Setup class before super class call super(WRLImporter, self).__init__(filepath) def _parse_format(self): r""" Use pyVRML to parse the file and build a mesh object. A single mesh per file is assumed. Raises ------ MeshImportError If no transform or shape is found in the scenegraph """ with open(self.filepath) as f: self.text = f.read() parser = buildVRML97Parser() vrml_tuple = parser.parse(self.text) # I assume these tuples are always built in this order scenegraph = vrml_tuple[1][1] shape_container = None # Let's check if for child in scenegraph.children: if type(child) in [basenodes.Transform, basenodes.Group]: # Only fetch the first container (unknown what do do with more # than one container at this time) shape_container = child break if shape_container is None: raise MeshImportError('Unable to find shape container in ' 'scenegraph') shape = None for child in shape_container.children: if type(child) is basenodes.Shape: # Only fetch the first shape (unknown what do do with more # than one shape at this time) shape = child break if shape is None: raise MeshImportError('Unable to find shape in container') mesh_points = shape.geometry.coord.point mesh_trilist = self._filter_non_triangular_polygons( shape.geometry.coordIndex) if type(shape.geometry.texCoord) is NullNode: # Colour per-vertex mesh_colour_per_vertex = shape.geometry.color.color mesh_tcoords = None else: # Texture coordinates mesh_colour_per_vertex = None mesh_tcoords = shape.geometry.texCoord.point # See if we have a separate texture index, if not just create an # empty array try: tex_trilist = self._filter_non_triangular_polygons( shape.geometry.texCoordIndex) except AttributeError: tex_trilist = np.array([-1]) # Fix texture coordinates - we can only have one index so we choose # to use the triangle index if np.max(tex_trilist) > np.max(mesh_trilist): new_tcoords = np.zeros([mesh_points.shape[0], 2]) new_tcoords[mesh_trilist] = mesh_tcoords[tex_trilist] mesh_tcoords = new_tcoords # Get the texture path - it's fine not to have one defined try: self.relative_texture_path = shape.appearance.texture.url[0] except (AttributeError, IndexError): self.relative_texture_path = None # Assumes a single mesh per file self.mesh = MeshInfo(mesh_points, mesh_trilist, mesh_tcoords, mesh_colour_per_vertex) self.meshes = [self.mesh] def _filter_non_triangular_polygons(self, coord_list): # VRML allows arbitrary polygon coordinates, whilst we only support # triangles. They are delimited by -1, so we split on them and filter # out non-triangle polygons index_list = coord_list index_list = np.split(index_list, np.where(index_list == -1)[0]) # The first polygon is missing the -1 at the beginning # Have to cast to int32 because that's the default, but on 64bit # machines a single number defaults to int64 np.insert(index_list[0], 0, np.array([-1], dtype=np.int32)) # Filter out those coordinates that are not triangles index_list = [i for i in index_list if len(i[1:]) == 3] # Convert to 2D array index_list = np.array(index_list) # Slice of -1 delimiters return index_list[:, 1:]
bsd-3-clause
-1,672,173,169,141,588,000
35.928767
117
0.602196
false
phockett/ePSproc
epsproc/efield/efields.py
1
41523
""" ePSproc Efield class """ import numpy as np from scipy import constants as scipy_constants import pprint import xarray as xr # Additional imports for plotting import matplotlib.pyplot as plt import holoviews as hv from holoviews import opts # Use dictionary to create field defns intially? Gets confusing otherwise. # Should be able to iterate/unpack values here for different cases. # Minimal defn. with (sigma, wavelength) or (bandwidth, wavelength), in working units. # Edef = {'sigma': 15, # 'l0': 800 # } class Efield(): ''' Basic class for handling E-field generation, data and related functions. Currently set for field creation based on Gaussian defined in time-domain. Defaults to a single (E,t) point if no parameters are supplied - this can be used with :py:func:`epsproc.geomFunc.EPR` Starting to look a bit top-heavy... passing directly to Xarray might save some structural overhead here? Define Gaussian field in the time-domain, with amplitude $E_0$, width $\sigma$ and carrier frequency $\omega_0$: .. math:: E(\omega_0,t)=E_{0}e^{-(t^2/2\sigma^{2})}e^{i\omega_0 t} Carrier frequency in rad/s: .. math:: \omega_0 = 2\pi \Omega_0 FWHM and Gaussian width (see https://en.wikipedia.org/wiki/Gaussian_function for full definitions): .. math:: FWHM = 2\sigma\sqrt{2ln2} Parameters ---------- If no parameters are passed, return single point with E0 = 1 For a time-dependent field, pass at least sigma. E0 : float, optional, default = None Set (relative) field strength. If None, set E0 = 1 sigma : float, optional, default = None Set Gaussian width :math:\sigma, where :math:FWHM=2\sigma\sqrt{2ln2} t : array, optional, default = None Set time axis for E-field. If None, defaults to np.arange(-(5*sigma), 5*sigma, self.dt), and self.dt = 0.01*sigma dt : float, optional, default = None Set time-step. Default will set from t-axis, or self.dt = 0.01*sigma if sigma only passed. Pass here to override. l0 : float, optional, default = None Central wavelength in wavelength working units. If supplied this will be used to set f0 and w0. f0, w0 : float, optional, default = None Carrier frequency in freq. units, and :math:\omega_0 in (rad/s). If only f0 supplied, w0 will be calculated. If f0 and w0 supplied, w0 will be used directly. If None, this will be ignored. units : dictionary, optional, default = None Set units. If not passed, default dict will be created with units (THz, nm, fs) set. TODO: - best way to handle unit conversions? Did some of this for FROG code already? Yes, see python\test_codes\E_fields_redux_231118_class.py - Consider other structures here, dataclass or namedtuple? https://stackoverflow.com/questions/354883/how-do-i-return-multiple-values-from-a-function - Xarray conversion? Use Xarray directly in class? - Bug with FFT/iFFT code? If shift=True and ishift=True then get extra freq components on iFFT - must be bug in axis assignment/conversion? OK if shift = False. ''' def __init__(self, Edef = None, units = None, verbose = True): # , E0 = None, sigma = None, A = 1, t = None, dt = None, l0 = None, f0 = None, w0 = None, units = None): #*** Define master list of parameters & init as empty or with defaults # If no parameters are passed, this can be used to create a defn template self.Edef = {'Pulse' : {'type' : 'Gaussian', # Eventually should generalise this to allow for other pulse types. 'domain' : 't', # Set a domain label here to switch between (t,E) domain pulse defn. - MAY WANT TO MOVE THIS TO SEPARATE LIST FN? 'dfft' : 'f', # Set conjugate domain (for FFT) 'sigma' : None, 'E0' : 1, # Set magnitude of field 'CEP':0, # Set CEP, only used if pulse carrier freq is also defined 'A' : 1, # Set A for Gaussian - should just set as E0? 'FWHM' : None, 'p' : 0 # Set polarization state. For single values {-1,0,+1} this will just label output. For 'XY' calc. spherical terms. # 'origin' : 0 # Set origin for domain }, 'Freq' : {'l' : None, # Set defns. for carrier, (wavelength, freq, ang. freq). Assumed to be in working units. Also use to index units later. 'f' : None, 'w' : None, # 'CEP':0, # Set CEP, only used if pulse carrier freq is also defined - SET IN PULSE for now, due to use of assignment loop for Freq terms. # 'Ehv' : None # Should add here, but need to fix assignment routine first! Now set by fConv() method. }, 'Ehv' : {'Ef' : None, # Defined energy domain to hold field converted to E(hv) units 'axis' : None }, 't' : {'Ef' : None, 'axis' : None, # Set generic labels here to allow for (t,f) axis shared methods. 'delta' : None }, 'f' : {'Ef' : None, 'axis' : None, 'delta' : None }, # 'EField' : {'Et' : None, # 'Ef' : None # }, # 'Spectrogram' : {'gate' : None, # 'data' : None # }, 'Units' : { # 'f':{'value':1e12, 'unit':'THz'}, # Set this as a derived unit, 1/t, below, otherwise may get discrepancies in unit conversion. 'l':{'value':1e-9, 'unit':'nm'}, 't':{'value':1e-15, 'unit':'fs'}, 'Ehv':{'value':1, 'unit':'eV'} }, # Global FFT settings # May have issues here with shift and ishift - always need the latter...? # Bug with FFT/iFFT code? If shift=True and ishift=True then get extra freq components on iFFT - must be bug in axis assignment/conversion? OK if shift = False. 'FFT' : {'shift':False, 'ishift':True, 'pad':True, 'positiveHalf':False, 'phaseMaskFlag':False, 'thres':1e-3} } self.Emod = {'N':0, 0:{}} # Set empty dict to hold modified fields (propagated, shaped etc.). Use N to hold next field to update value. # Assign any passed values # Skip if no Edef is passed (will just set blank dict) if Edef is not None: for key in Edef: if key in self.Edef: for subkey in Edef[key]: if subkey in self.Edef[key]: # print(f'Setting Item {subkey} in dict {key} to {Edef[key][subkey]}') self.Edef[key][subkey] = Edef[key][subkey] else: print(f'Item {subkey} in dict {key} not recognised.') else: print(f'Key {key} not recognised.') #*** Set working units for conversions # Set f units self.Edef['Units']['f'] = {'value': np.round(1/self.Edef['Units']['t']['value']), 'unit': f"1/{self.Edef['Units']['t']['unit']}"} # Derived unit # Set as Hz in specific cases if self.Edef['Units']['f']['value'] == 1e15: self.Edef['Units']['f']['unit'] = 'PHz' if self.Edef['Units']['f']['value'] == 1e12: self.Edef['Units']['f']['unit'] = 'THz' # Set units for E - NOW SET DERIVED UNITS via fConv() method # self.Edef['Units']['E'] = self.Edef['Units']['f'].copy() # Set c in working units self.Edef['Units']['c'] = {'value': scipy_constants.c * self.Edef['Units']['t']['value']/self.Edef['Units']['l']['value'], 'unit': f"{self.Edef['Units']['l']['unit']}/{self.Edef['Units']['t']['unit']}"} # self.Edef['Units']['c']['value'] = scipy_constants.c * self.Edef['Units']['t']['value'] /self.Edef['Units']['l']['value'] #**** Calculate field and derived properties if set self.setEf() # Set description string - use """ format string, or ""\ multiline domain = self.Edef['Pulse']['domain'] self.Estring = f"""{self.Edef['Pulse']['type']} pulse: $\sigma$={self.Edef['Pulse']['sigma']} {self.Edef['Units'][domain]['unit']}, FWHM={self.Edef['Pulse']['FWHM']:.3f} {self.Edef['Units'][domain]['unit']}, l0={self.Edef['Freq']['l']:.3f} (dl={self.Edef['Pulse']['dl']:.3f}) {self.Edef['Units']['l']['unit']}, f0={self.Edef['Freq']['f']:.3f} (df={self.Edef['Pulse']['dw']:.3f}) {self.Edef['Units']['f']['unit']} (bandwidths for Gaussian transform limited pulses)""" # Print summary details if verbose: self.printDef() def printDef(self): print('Pulse properties dictionary set:') pp = pprint.PrettyPrinter(indent=4) # Print defns. for key in ['Pulse', 'Freq', 'Units', 'FFT']: print(f'{key} settings:') pp.pprint(self.Edef[key]) # Print domain details for key in [self.Edef['Pulse']['domain'], self.Edef['Pulse']['dfft']]: if self.Edef[key]['Ef'] is not None: print(f'{key} domain settings:') print(f"Points = {len(self.Edef[key]['Ef'])}, delta = {self.Edef[key]['delta']}") else: print(f"{key} domain not set") # Print field details for key in ['EField', 'Spectrogram']: pass def setEf(self): """ Calculate pulse properties and E-fields based on Edef. """ # Set pulse in time-domain if self.Edef['Pulse']['domain'] == 't': # Set pulse FWHM or sigma if defined. # If both values are preset, only sigma is used. if (self.Edef['Pulse']['sigma'] is not None) and (self.Edef['Pulse']['type'] == 'Gaussian'): self.FWHMGau() elif(self.Edef['Pulse']['FWHM'] is not None) and (self.Edef['Pulse']['type'] == 'Gaussian'): self.sigmaGau() # Set t axis if required if (self.Edef['t']['axis'] is None) and (self.Edef['Pulse']['sigma'] is not None): if self.Edef['t']['delta'] is None: self.Edef['t']['delta'] = 1e-3*self.Edef['Pulse']['sigma'] # Default step size for t axis, relative to sigma. May get large however! self.Edef['t']['axis'] = np.arange(-(5*self.Edef['Pulse']['sigma']), 5*self.Edef['Pulse']['sigma'], self.Edef['t']['delta']) # No pulse in this case, just set to single point (t=0) elif (self.Edef['t']['axis'] is None) and (self.Edef['Pulse']['sigma'] is None): self.Edef['t']['axis'] = [0] # If t-axis is passed, set dt from this - assumes linear axis # Use length here to allow for list or np.array types if (len(self.Edef['t']['axis']) > 1) and (self.Edef['t']['delta'] is None): self.Edef['t']['delta'] = np.abs(self.Edef['t']['axis'][1]-self.Edef['t']['axis'][0]) # Check and set carrier freq if set if any(self.Edef['Freq'].values()): # Check which value is set, and set missing values. # Neater/slicker way to do this? Recursively? for key in self.Edef['Freq']: if self.Edef['Freq'][key] is not None: refKey = key refValue = self.Edef['Freq'][key] # print(refValue) if refKey is 'w': refValue *= self.Edef['Units']['f']['value']/2*np.pi # TODO: set for Ehv case. # if refKey is 'Ehv': # refValue *= self.Edef['Units']['f']['value']/2*np.pi # else: # refValue *= self.Edef['Units'][refKey]['value'] # Convert to real units - not required if c in working units for key in self.Edef['Freq']: if key not in [refKey, 'w']: # self.Edef['Freq'][key] = (scipy_constants.c/refValue)/self.Edef['Units'][key]['value'] # Convert between wavelength and freq., and set to working units self.Edef['Freq'][key] = self.Edef['Units']['c']['value']/refValue # With c in working units if refKey is not 'w': # Set w0 in working units (rad/[unit t]) # self.Edef['Freq']['w'] = 2*np.pi * self.Edef['Freq']['f'] * self.Edef['Units']['f']['value'] * self.Edef['Units']['t']['value'] self.Edef['Freq']['w'] = 2*np.pi * self.Edef['Freq']['f'] self.ECalc() # Set defined field self.EFFT() # Set FFT field # Set fields in Emod too - may eventually replace above with this? for key in [self.Edef['Pulse']['domain'], self.Edef['Pulse']['dfft']]: self.Emod[self.Emod['N']][key] = self.Edef[key] self.Emod['N'] += 1 #***************** Basic generators # Define Gaussian pulse in time domain, if carrier freq. is not defined calc. envelope only # Sigma = Gaussian width, FWHM = 2 # Define Gaussian def Gau(self): # g = np.exp(-0.5*self.Edef['Pulse']['A']*(self.Edef['t']['axis']**2)/(self.Edef['Pulse']['sigma']**2)) # g = np.exp(-0.5*self.Edef['Pulse']['A']*((self.Edef[self.Edef['Pulse']['domain']]['axis']/self.Edef['Pulse']['sigma'])**2)) g = np.exp(-0.5*self.Edef['Pulse']['A']*(self.Edef[self.Edef['Pulse']['domain']]['axis']**2)/(self.Edef['Pulse']['sigma']**2)) return g # Define FWHM from sigma, for a Gaussian pulse def FWHMGau(self): self.Edef['Pulse']['FWHM'] = 2*np.sqrt(2*np.log(2))*self.Edef['Pulse']['sigma'] self.Edef['Pulse']['dw'] = 0.44/self.Edef['Pulse']['sigma'] # Spectral width for a Gaussian pulse, working units, sigma*tUnit to give in Hz # This is a bit of a fudge - need to decide where to put dw, and E domain defn. if self.Edef['Freq']['l'] is not None: self.Edef['Pulse']['dl'] = (self.Edef['Pulse']['dw']*self.Edef['Freq']['l']**2)/self.Edef['Units']['c']['value'] # self.Edef['Pulse']['dl'] = ((self.Edef['Pulse']['dw']*self.Edef['Freq']['l']**2)/scipy_constants.c) # Spectral width in wavelength units # print(f"For sigma={self.Edef['Pulse']['sigma']}: FWHM={self.Edef['Pulse']['FWHM']:.3f}, spectral width (transform limit)={self.Edef['Pulse']['dw']:.3f}") # Set field based on defined pulse parameters def ECalc(self): domain = self.Edef['Pulse']['domain'] self.Edef[domain]['Ef'] = self.Edef['Pulse']['E0'] * self.Gau() if (self.Edef['Freq']['w'] is not None) and (domain == 't'): # Only valid for t-domain defn. # print('OK') # print(self.Edef['Freq']['w']) # print(self.Edef[domain]['axis']) # print(np.exp(1.0j*self.Edef['Freq']['w']*self.Edef[domain]['axis'])) # self.Edef[domain]['Ef'] = self.Edef[domain]['Ef'] * np.exp(1.0j*self.Edef['Freq']['w']*self.Edef[domain]['axis']/self.Edef['Units'][domain]['value']) # self.Edef[domain]['Ef'] = self.Edef[domain]['Ef'] * np.exp(1.0j*self.Edef['Freq']['w']*self.Edef[domain]['axis']*self.Edef['Units'][domain]['value']) self.Edef[domain]['Ef'] = self.Edef[domain]['Ef'] * np.exp(1.0j*(self.Edef['Freq']['w']*self.Edef[domain]['axis'] - self.Edef['Pulse']['CEP'])) #********************* FT functions # Set other domain field as FFT # Define spectral domain pulse as FFT(E(t)). Return normalised freq. axis if t and dt are defined. def EFFT(self): #, pad = True, positiveHalf = True, phaseMaskFlag = False, thres = 1e-3): # Now set in dict. domain = self.Edef['Pulse']['domain'] dfft = self.Edef['Pulse']['dfft'] # Use zero-padding for higher resolution FFT result? if self.Edef['FFT']['pad'] is True: n = np.int(2**(np.ceil(np.log2(self.Edef[domain]['Ef'].shape[0]))+3)) nAxis = n else: n = None # Default value to pass to np.fft for no padding nAxis = self.Edef[domain]['axis'].shape[-1] Ebar = np.fft.fft(self.Edef[domain]['Ef'], n=n) # No fft shift, axis [0 .... +ve .... -ve], see https://numpy.org/doc/stable/reference/generated/numpy.fft.fftfreq.html?highlight=fft axis = np.fft.fftfreq(nAxis, d=self.Edef[domain]['delta']) if self.Edef['FFT']['shift']: Ebar = np.fft.fftshift(Ebar) # Apply fft shift to move 0 to centre of range axis = np.fft.fftshift(axis) # Set for full or half FT if self.Edef['FFT']['positiveHalf']: if self.Edef['FFT']['shift']: inds = np.arange(np.int(Ebar.shape[0]/2), Ebar.shape[0]) # Set for half the range, starting from centre else: inds = np.arange(0, np.int(Ebar.shape[0]/2)) # Set for half the range, starting at 0 self.Edef[dfft]['Ef'] = Ebar[inds] self.Edef[dfft]['axis'] = axis[inds] self.Edef[dfft]['delta'] = axis[1]-axis[0] else: self.Edef[dfft]['Ef'] = Ebar self.Edef[dfft]['axis'] = axis self.Edef[dfft]['delta'] = axis[1]-axis[0] if self.Edef['FFT']['phaseMaskFlag']: self.phaseMask(domain = dfft) # # Function for checking for modified fields & sorting # def checkEmod(self): # # # Loop over domains # # for domain in [self.Edef['Pulse']['domain'], self.Edef['Pulse']['dfft']]: # # # Check for existing mod fields # # if domain in self.Emod.keys(): # # N = list(self.Emod[domain].keys()) # # # Should change formatting here, may want domain_N to allow for multiple cases, and looping. # # if (domain + '_mod') in self.Edef.keys(): # # N = list(self.Edef[domain + '_mod'].keys()) # # else: # # self.Edef[domain + '_mod'] = {} # # N = 0 # # Check if entries exist # if self.Emod: # N = self.Emod.N # else: # N = 0 # # Set # Calculate iFFT(E). # This, sort of, assumes that the field is defined as E(w), and the spectral phase is modified. # But could be used for other cases. # Emod is used if passed, otherwise uses self.Edef[dfft]['Ef'] # 27/04/20 Modified to use self.Emod for fields. # In this case, send either Emod to use this directly, or Ninput to use self.Emod[Ninput]. # Set comment = '' to pass notes on field generation. def EiFFT(self, Emod = None, Ninput = None, comment = ''): #, f=None, pad=False): domain = self.Edef['Pulse']['domain'] dfft = self.Edef['Pulse']['dfft'] Nlast = self.Emod['N'] - 1 # Most recent field index, use this as input if nothing else specified # Set field based on input - this is currently a bit ugly! # if (Emod is None) and (Ninput is None): # if dfft in self.Emod[Nlast]: # Emod = self.Emod[Nlast][dfft]['Ef'] # Default to most recent field if it exists # EmodAxis = self.Emod[Nlast][dfft]['axis'] # else: # Emod = self.Edef[dfft]['Ef'] # Revert to original field defn. if not supplied # EmodAxis = self.Edef[dfft]['axis'] # elif (Emod is None) and (Ninput is not None): # Set specific field from Emod # Emod = self.Emod[Ninput][dfft]['Ef'] # EmodAxis = self.Emod[Ninput][dfft]['axis'] # Rewrite for updated Emod dict. N = Nlast # Set default if Ninput is not None: N = Ninput if Emod is None: Emod = self.Emod[N][dfft]['Ef'] # Default to most recent field if it exists EmodAxis = self.Emod[N][dfft]['axis'] if 'comment' in self.Emod[N][dfft].keys(): comment += self.Emod[N][dfft]['comment'] # Propagate comment # Transform back to time-domain, and center # Eifft = np.fft.ifftshift(np.fft.ifft(Emod)) # With shift Eifft = np.fft.ifft(Emod) # Without shift axis = np.fft.fftfreq(Eifft.shape[-1], d=self.Edef[dfft]['delta']) if self.Edef['FFT']['ishift']: Eifft = np.fft.ifftshift(Eifft) # Apply fft shift if set axis = np.fft.ifftshift(axis) # Set for full or half FT - if used with iFFT shift and a pulse center at 0, this will slice result. inds = np.arange(0, Eifft.shape[0]) # Set for full range if self.Edef['FFT']['positiveHalf']: Eifft /= 2.0 # Correct amplitudes, get x2 otherwise if positiveHalf is True. if self.Edef['FFT']['ishift']: inds = np.arange(np.int(Eifft.shape[0]/2), Eifft.shape[0]) # Set for half the range, starting from centre else: inds = np.arange(0, np.int(Eifft.shape[0]/2)) # Set for half the range, starting at 0 # self.Edef[domain]['Ef'] = np.c_[self.Edef[domain]['Ef'], Eifft] # Basic stacking OK if pad=False and positiveHalf=False, otherwise axis lengths different # if self.Edef['FFT']['positiveHalf']: # inds = np.int(Eifft.shape[0]/2) # Set for half the range # # Set as new field, or as new dict? # # Should change formatting here, may want domain_N to allow for multiple cases, and looping. # if (domain + '_mod') in self.Edef.keys(): # N = list(self.Edef[domain + '_mod'].keys()) # else: # self.Edef[domain + '_mod'] = {} # N = 0 # Set pair of fields in output # Always set to new output, or check for pair...? if domain in self.Emod[Nlast]: # If conjugate domain is already set, create a new field pair... Noutput = Nlast + 1 self.Emod[Noutput] = {} self.Emod[Noutput][dfft] = {'Ef':Emod, 'axis':EmodAxis, 'domain':domain } else: Noutput = Nlast # ...otherwise use input index. self.Emod[Noutput][domain] = {'Ef':Eifft[inds], 'axis':axis[inds], 'domain':domain, 'comment':comment # Add passed comment here, may also want to autogenerate note on field source? } #if f: # t = np.fft.ifftfreq(f.shape[-1]) #else: # t = None # TODO def finst(self): """Calculate f(t) as instantaneous freq.""" print('Not impemented') #*************** Phase modification fns. # TODO: add more sophisticated fns, see, e.g., froglib.phasemanipulations, for removing linear and phase offsets. # Mask phase away from pulse? def phaseMask(self, domain = None, thres = None): """Mask pulse phase away from main features. Should convert to use masked arrays for reversibility, or just return a mask (currently overwrites Ef).""" if thres is None: thres = self.Edef['FFT']['thres'] # Use global thres if not passed. thres = thres * np.abs(self.Edef[domain]['Ef']).max() self.Edef[domain]['Ef'][np.abs(self.Edef[domain]['Ef']) < thres] = 0 # Basic mask to zero away from features # self.Edef[domain]['Ef'][np.abs(self.Edef[domain]['Ef']) < thres] = np.nan # Set as NaNs - will remove points from plots, but also throws errors with np.angle() def setPhase(self, phaseVec = None, domain = None): """Set imaginary field terms (set phase) for specified domain & recalculated FFT.""" # Example: set quadratic phase # Set quadratic phase (corresponds to linear chirp), phi=A*f^2 # A = 0.1/fUnit # Ewq = np.abs(E2w)*np.exp(1.0j*(A*(f-(f0/tUnit)))**2) # Transform back to time-domain # E2tmod = np.fft.ifftshift(np.fft.ifft(Ewq)) pass # TODO: impement, and add original & modified field placeholders. Or set as new rows in Ef array? def removePhase(self, domain = None): """Remove imaginary field terms (reset phase).""" if domain is None: for item in ['t', 'f']: self.Edef[item]['Ef'] = np.abs(self.Edef[item]['Ef']) else: self.Edef[domain]['Ef'] = np.abs(self.Edef[domain]['Ef']) def chirp(self, A, resetPhase=False, comment = None): """ Add quadratic phase (chirp) in spectral domain. Requires an E-field object, and chirp parameter A. .. :math: phi=A*(f-f0)^2 TODO: check Diels for nomenclature here. """ domain = 'f' Nlast = self.Emod['N'] # Last output slot, use N-1 as input if nothing else specified # Set field - checks no longer required as now Emod[0] set at init (in setEf()). # try: # Ew = self.Emod[Nlast-1][domain]['Ef'] # EwAxis = self.Emod[Nlast-1][domain]['axis'] # except KeyError: # Ew = self.Edef[domain]['Ef'] # EwAxis = self.Edef[domain]['axis'] Ew = self.Emod[Nlast-1][domain]['Ef'] EwAxis = self.Emod[Nlast-1][domain]['axis'] # Remove existing phase - this is only the linear term if starting from a Gaussian pulse if resetPhase: Ew = np.abs(Ew) # Add chirp # Ewq = np.abs(Ew)*np.exp(1.0j*(A*(self.Edef[domain]['axis']-self.Edef['Freq']['f']))**2) # Ewq = Ew*np.exp(1.0j*A*(self.Edef[domain]['axis']-self.Edef['Freq']['f'])**2) Ewq = Ew*np.exp(1.0j*A*(EwAxis-self.Edef['Freq']['f'])**2) # Set as new field, or as new dict? # Should change formatting here, may want domain_N to allow for multiple cases, and looping. # Or nest this? Or concatenate? (-- Only if domain axes are identical.) # if (domain + '_mod') in self.Edef.keys(): # N = list(self.Edef[domain + '_mod'].keys()) # else: # self.Edef[domain + '_mod'] = {} # N = 0 # self.Edef[domain + '_mod'][N] = {'Ef':Ewq, # 'axis':self.Edef[domain]['axis'], # 'domain':domain # } self.Emod[Nlast] = {} self.Emod[Nlast][domain] = {'Ef':Ewq, 'axis':EwAxis, 'domain':domain, 'comment': f'$E_{{{Nlast-1}}}$, phase {domain} chirped, A={A}', 'A':A } self.Emod['N'] += 1 # Update indexer # self.EiFFT(Emod = Ewq) # Set tmod pulse via ifft self.EiFFT() # Set tmod pulse via ifft #***************** Spectrograms # Basic (Frog) spectrograms - code adapted from Froglib, https://github.com/xmhk/froglib # For code implementing various Frog methods, see "E_fields_redux_231118_class.py" - to be implemented here as frog() method def calcSpectrogram(self, signal = None, gate = None, fType = 'blind', N = None, verbose = False): domain = 't' # Set signal and gate fields. If not passed, use most recently set fields. if N is None: N = self.Emod['N'] - 1 if signal is None: signal = self.Emod[N][domain]['Ef'] elif type(signal) is np.ndarray: pass # elif type(signal) is ####### May want to allow for passing of pulse defn. dictionary here? gateObj = None if gate is None: gate = signal # Default to signal, or to short gaussian...? elif type(gate) is float: # Take a passed value as a Gaussian width...? pass elif type(gate) is dict: # Generate gate pulse as new Ef object gateObj = Efield(gate, verbose = verbose) gate = gateObj.Edef[domain]['Ef'] # elif type(signal) is np.ndarray: # pass # TODO: # - Error checking, currently needs square array. # - Downsampling for cases with large FFT axis - just select ROI around features (see Frog code?) # - Methods, see "E_fields_redux_231118_class.py" for more frog types. # Following code in froglib... nn = len(signal) n2 = int(nn / 2) # (1) Outer product of two fields (time-domain), blind Frog case # ap = np.outer(signal, gate) # (1) Outer product of two fields (time-domain), depending on type of Frog # NOTE field ordering matters for X-Frog definitions. # Set options using dictonary (see https://simonwillison.net/2004/May/7/switch/ and https://stackoverflow.com/questions/60208/replacements-for-switch-statement-in-python) ap = { 'SHG': lambda f1,f2: np.outer(f1, f2) + np.outer(f2, f1), # SHG case, symmetric in time 'blind': lambda f1,f2: np.outer(f1, f2), # Blind case, just two fields 'SD': lambda f1,f2: np.outer(f1**2,f2.conjugate()), # SD classic 'SDr': lambda f1,f2: np.outer(f2**2,f1.conjugate()), # SD classic - field ordering reversed, matters in X-Frog case # 'SDb': lambda f1,f2: np.outer(f1,f2.conjugate()**2), 'SD1': lambda f1,f2: np.outer(f1, f1.conjugate()*f2), # Various options for alternative SD cases, depending on non-linearity and field ordering 'SD2': lambda f1,f2: np.outer(f1*f2,f2), 'SD3': lambda f1,f2: np.outer(f1,f1*f2.conjugate()), 'PG': lambda f1,f2: np.outer(f1, np.abs(f2)**2), # PG classic - results match Trebino for cubic case (flipped relative to SD) 'PGr': lambda f1,f2: np.outer(f2, np.abs(f1)**2), # PG classic - field ordering reversed, matters in X-Frog case 'PG1': lambda f1,f2: np.outer(np.abs(f1)**2, f2), # PG classic - this defn. identical to SD case. 'TG1': lambda f1,f2: np.outer(f1, f2**2) # TG options # 'TG2': lambda f1,f2: np.outer(f1, f2**2) }[fType](signal, gate) # (2) Defined empty arrays to hold results m1 = np.zeros(np.shape(ap), dtype=np.complex128) m2 = np.zeros(np.shape(ap), dtype=np.complex128) # (3) Loop over input and roll - effectively sets tau for each row for i in range(n2 - 1, -n2, -1): m1[i + n2, :] = np.roll(ap[i + n2, :], -i) m1 = np.transpose(m1) # (4) Roll and FFT to set a freq. axis for i in range(nn): m2[i, :] = np.roll(np.fft.fft(np.roll(m1[i, :], +n2)), -n2) # time-freq m2 = np.transpose(m2) # freq - time m2 = m2 / np.max(np.max(np.abs(m2))) # return m2 # Set outputs - should just set in Emod....? self.Spectrogram = {'siganl':signal, 'gate':gate, 'gateObj':gateObj, 'data':m2, 'N':N } #***************** Derived domains/unit conversion # Set other domains via copy & rescale - would be neater just to store multiple axes, but duplicate for now. def fConv(self): """Convert freq. domain axis to lenght & energy units. and set fields.""" domain = 'f' for N in np.arange(0, self.Emod['N']): # Set wavelength scale self.Emod[N]['l'] = self.Emod[N][domain].copy() # Without .copy() this will just be a pointer. self.Emod[N]['l']['axis'] = self.Edef['Units']['c']['value']/self.Emod[N]['f']['axis'] # Set energy scale self.Emod[N]['Ehv'] = self.Emod[N][domain].copy() self.Emod[N]['Ehv']['axis'] = (scipy_constants.h/scipy_constants.e) * self.Emod[N]['f']['axis']/self.Edef['Units']['t']['value'] #***************** Conversion... # Convert a single set of fields to an Xarray Dataset # With spectrogram + looping over domains - assumes axis sizes are concomittant I think def toXarrayDS(self, N = None): if N is None: N = self.Emod[N] -1 # Default to last set field set ds = xr.Dataset() # Init empty dataset, then loop over fields for domain in self.Emod[N].keys(): domainName = f'E{domain}' # Set data name - can't be same as coord names in this case if domain in [self.Edef['Pulse']['domain'], self.Edef['Pulse']['dfft']]: # Key dims, set as unlinked ds[domainName] = ((domain), self.Emod[N][domain]['Ef']) ds.coords[domain] = self.Emod[N][domain]['axis'] else: ds[domainName] = ((domain), self.Emod[N][domain]['Ef']) ds.coords[domain] = ((self.Edef['Pulse']['dfft']), self.Emod[N][domain]['axis']) # For derived dims, set as linked to dfft dim - should set this more cleanly elsewhere...? # Set units (will be used for plotting) ds[domain].attrs['units'] = self.Edef['Units'][domain]['unit'] ds[domainName].attrs['units'] = 'arb' # Assign spectrogram if hasattr(self, 'Spectrogram'): ds['spectrogram'] = ((self.Edef['Pulse']['dfft'], self.Edef['Pulse']['domain']), np.abs(self.Spectrogram['data'])) self.Emod[N]['ds'] = ds #***************** Plotting # Basic plotting # TODO: check plotTypes vs. defns. in ePSproc # TODO: modify to plot sets of fields, either stacked or single plot. # 28/04/20: Changed to plot from Emod[N] dicts # TODO: sort out axis limits - should pass to override or set in class. Also change to, e.g. Holoviews, for more interaction... def plot(self, plotType = 'phaseUW', Nplot = None, domainList = None, thres = 1e-2): '''Basic plot with Matplotlib''' # plt.plot(self.t, self.Et) if Nplot is None: Nplot = np.arange(0, self.Emod['N']) # Default plots for domain + dfft if domainList is None: domainList = [self.Edef['Pulse']['domain'], self.Edef['Pulse']['dfft']] for domain in domainList: # Set up figure - do this *before* looping over N (fields) fig, ax1 = plt.subplots() if plotType == 'phaseUW': ax2 = ax1.twinx() # lText = [] # Plot selected fields for N in Nplot: lString = f'$E_{{{N}}}$ ' # Plot according to type if plotType == 'complex': # Plot real + imag field components ax1.plot(self.Emod[N][domain]['axis'], self.Emod[N][domain]['Ef'].real, '-', label = lString + 'Re') ax1.plot(self.Emod[N][domain]['axis'], self.Emod[N][domain]['Ef'].imag, '-', label = lString + 'Im') ax1.plot(self.Emod[N][domain]['axis'], np.abs(self.Emod[N][domain]['Ef']), '--', label = f'|{lString}|') # plt.legend(['Re', 'Im', 'Abs']) # lText.extend([f'{N} Re', f'{N} Im', f'{N} Abs']) elif plotType == 'field': # Plot real-valued field (E + E*) ax1.plot(self.Emod[N][domain]['axis'], 0.5*(self.Emod[N][domain]['Ef'] + self.Emod[N][domain]['Ef'].conj()), '-', label = f'{lString}+{lString}*') elif plotType == 'abs': # Plot envelope only, |E| ax1.plot(self.Emod[N][domain]['axis'], np.abs(self.Emod[N][domain]['Ef']), '-', label = f'|{lString}|') elif plotType == 'phase': # Plot magnitude + phase ax1.plot(self.Emod[N][domain]['axis'], np.abs(self.Emod[N][domain]['Ef']),'-', label = f'|{lString}|') ax1.plot(self.Emod[N][domain]['axis'], (np.angle(self.Emod[N][domain]['Ef'])), '--', label = lString + 'Phase') # lText.extend([f'{N} Abs', f'{N} Phase']) # lText.extend((f'{N} Abs', f'{N} Phase')) # ax1.legend(['Abs', 'Phase']) elif plotType == 'phaseUW': # Plot magnitude + phase, unwrapped # Single axis # plt.plot(self.Edef[domain]['axis'], np.abs(self.Edef[domain]['Ef']), self.Edef[domain]['axis'], np.unwrap(np.angle(self.Edef[domain]['Ef']))) # Test secondary_y - not working # plt.plot(self.Edef[domain]['axis'], np.abs(self.Edef[domain]['Ef'])) # plt.plot(self.Edef[domain]['axis'], np.unwrap(np.angle(self.Edef[domain]['Ef'])), secondary_y=True) # Full ax addressing ax1.plot(self.Emod[N][domain]['axis'], np.abs(self.Emod[N][domain]['Ef']), '-', label = f'|{lString}|') ax2.plot(self.Emod[N][domain]['axis'], np.unwrap(np.angle(self.Emod[N][domain]['Ef'])), '--', label = lString + 'Phase') # plt.legend(['Abs', 'Phase (unwrapped)']) # lText.extend([f'{N} Abs', f'{N} Phase']) # lText.extend((f'{N} Abs', f'{N} Phase')) if plotType != 'phaseUW': plt.ylabel('Amplitude') plt.xlabel(self.Edef['Units'][domain]['unit']) else: ax1.set_ylabel('Amplitude') ax2.set_ylabel('Phase (unwrapped)') ax1.set_xlabel(self.Edef['Units'][domain]['unit']) # plt.xlim((-2.66, 2.66)) # Set for two cycles at 800nm # plt.xlim(-0.5, 0.5) # Set some sensible limits (FFT scales will be large) if domain == 'f': # self.Edef['Pulse']['dfft']: # Hmmm, should be able to do this generically over all domains? With origin + width? # TODO: move origins + widths to domain-specific containers. plt.xlim(0.8*self.Edef['Freq']['f'], 1.2*self.Edef['Freq']['f']) elif domain == 't': scale = [-2.5, 2.5] plt.xlim(scale[0]*self.Edef['Pulse']['FWHM'], scale[1]*self.Edef['Pulse']['FWHM']) else: # Estimate from feature... peak = np.abs(self.Emod[N][domain]['Ef']).max() mask = np.abs(self.Emod[N][domain]['Ef']) > peak*thres scale = [self.Emod[N][domain]['axis'][mask].min(), self.Emod[N][domain]['axis'][mask].max()] plt.xlim(scale[0], scale[1]) # Set legend from array or list # plt.legend(Nplot) # plt.legend(lText) # Set legends from labels, per axis object if plotType == 'phaseUW': ax1.legend(loc='upper left') ax2.legend(loc='upper right') else: ax1.legend(loc='upper left') # This sometimes defaults to middle, so set explicitly plt.title(self.Estring + f'\nplotType = {plotType}') plt.show() def plotSpectrogram(self): """ VERY basic spectrogram plotter from old code - for quick testing only. TODO: fix axes for with/withou fft shift. BETTER: use plotSpectrogramHV() instead, this uses full axes as set. """ # Testing - set vars as per old code for brevity S = self.Spectrogram['data'] N = self.Emod['N'] - 1 t = self.Emod[N]['t']['axis'] f = self.Emod[N]['f']['axis'] plt.figure() plt.imshow(np.abs(S)**2, extent = [t[0],t[-1],f[np.int(S.shape[0]/2)-1],f[np.int(S.shape[0]/2)]], aspect='auto') plt.ylim((1.5*(f0/tUnit),2.5*(f0/tUnit))) plt.ylabel('$\Omega$ /THz') plt.xlabel('t /fs') plt.title('Spectrogram') plt.show() def plotSpectrogramHV(self, N = None): # May need to set this directly in notebook? hv.extension('bokeh') # Set N if N is None: N = self.Emod['N'] -1 # Check ds exists if not 'ds' in self.Emod[N].keys(): self.toXarrayDS(N = N) hv_ds = hv.Dataset(self.Emod[N]['ds']['spectrogram']) # hv_ds self.spec = hv_ds.to(hv.Image, kdims=['t','f']) self.spec.opts(width=700, height=700)
gpl-3.0
7,585,800,800,355,117,000
43.552575
219
0.525155
false
kamcpp/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/operator_pd_diag_test.py
19
3260
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import numpy as np import six import tensorflow as tf from tensorflow.contrib.distributions.python.ops import operator_pd_diag from tensorflow.contrib.distributions.python.ops import operator_test_util @six.add_metaclass(abc.ABCMeta) class OperatorPDDiagBaseTest(object): def setUp(self): self._rng = np.random.RandomState(42) def _random_pd_diag(self, diag_shape): return self._rng.rand(*diag_shape) + 0.1 @abc.abstractmethod def _diag_to_matrix(self, diag): pass @abc.abstractproperty def operator_class(self): # Return the operator class that this tests. pass def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64): # Create a diagonal matrix explicitly. # Create an OperatorPDSqrtDiag using the same diagonal. # The operator should have the same behavior. # batch_shape = list(batch_shape) diag_shape = batch_shape + [k] # The diag is the square root. diag = self._random_pd_diag(diag_shape).astype(dtype) mat = self._diag_to_matrix(diag).astype(dtype) operator = self.operator_class(diag) return operator, mat def testNonPositiveDefiniteMatrixRaises(self): # Singlular matrix with one positive eigenvalue and one zero eigenvalue. with self.test_session(): diag = [1.0, 0.0] operator = operator_pd_diag.OperatorPDSqrtDiag(diag) with self.assertRaisesOpError("assert_positive"): operator.to_dense().eval() def testNonPositiveDefiniteMatrixDoesNotRaiseIfNotVerifyPd(self): # Singlular matrix with one positive eigenvalue and one zero eigenvalue. with self.test_session(): diag = [1.0, 0.0] operator = operator_pd_diag.OperatorPDSqrtDiag(diag, verify_pd=False) operator.to_dense().eval() # Should not raise class OperatorPDDiagTest( OperatorPDDiagBaseTest, operator_test_util.OperatorPDDerivedClassTest): """Most tests done in the base classes.""" def _diag_to_matrix(self, diag): return tf.matrix_diag(diag).eval() @property def operator_class(self): return operator_pd_diag.OperatorPDDiag class OperatorPDSqrtDiagTest( OperatorPDDiagBaseTest, operator_test_util.OperatorPDDerivedClassTest): """Most tests done in the base classes.""" def _diag_to_matrix(self, diag): return tf.matrix_diag(diag**2).eval() @property def operator_class(self): return operator_pd_diag.OperatorPDSqrtDiag if __name__ == "__main__": tf.test.main()
apache-2.0
6,146,617,093,454,137,000
30.650485
80
0.708589
false
hmenke/espresso
src/python/object_in_fluid/oif_classes.py
1
67795
# Copyright (C) 2010-2018 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import espressomd import numpy as np from .oif_utils import * from espressomd.interactions import OifLocalForces from espressomd.interactions import OifGlobalForces from espressomd.interactions import OifOutDirection class FixedPoint(object): """ Represents mesh points, not connected to any ESPResSo particle. """ def __init__(self, pos, id): if not isinstance(id, int): raise TypeError("Id must be integer.") if not ((len(pos) == 3) and isinstance(pos[0], float) and isinstance(pos[1], float) and isinstance(pos[2], float)): raise TypeError("Pos must be a list of three floats.") self.x = pos[0] self.y = pos[1] self.z = pos[2] self.id = id def get_pos(self): return [self.x, self.y, self.z] def get_id(self): return self.id class PartPoint(object): """ Represents mesh points, connected to ESPResSo particle. """ def __init__(self, part, id, part_id): # part is physical ESPResSo particle corresponding to that particular point if not (isinstance(part, espressomd.particle_data.ParticleHandle) and isinstance(id, int) and isinstance(part_id, int)): raise TypeError("Arguments to PartPoint are incorrect.") self.part = part self.part_id = part_id # because in adding bonds to the particles in OifCell # one needs to know the global id of the particle. self.id = id def get_pos(self): return self.part.pos def get_vel(self): return self.part.v def get_mass(self): return self.part.mass def get_type(self): return self.part.type def get_force(self): return self.part.f def set_pos(self, pos): self.part.pos = pos def set_vel(self, vel): self.part.v = vel def set_force(self, force): self.part.ext_force = force def kill_motion(self): self.part.fix = [1, 1, 1] def unkill_motion(self): self.part.unfix() class Edge(object): """ Represents edges in a mesh. """ def __init__(self, A, B): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))): TypeError("Arguments to Edge must be FixedPoint or PartPoint.") self.A = A self.B = B def length(self): return vec_distance(self.A.get_pos(), self.B.get_pos()) class Triangle(object): """ Represents triangles in a mesh. """ def __init__(self, A, B, C): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))) and (isinstance(C, PartPoint) or (isinstance(C, FixedPoint))): TypeError("Arguments to Triangle must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C def area(self): area = area_triangle( self.A.get_pos(), self.B.get_pos(), self.C.get_pos()) return area class Angle(object): """ Represents angles in a mesh. """ def __init__(self, A, B, C, D): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) \ and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))) \ and (isinstance(C, PartPoint) or (isinstance(C, FixedPoint))) \ and (isinstance(D, PartPoint) or (isinstance(D, FixedPoint))): TypeError("Arguments to Angle must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C self.D = D def size(self): angle_size = angle_btw_triangles( self.A.get_pos(), self.B.get_pos(), self.C.get_pos(), self.D.get_pos()) return angle_size class ThreeNeighbors(object): """ Represents three best spatially distributed neighbors of a point in a mesh. """ def __init__(self, A, B, C): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) \ and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))) \ and (isinstance(C, PartPoint) or (isinstance(C, FixedPoint))): TypeError( "Arguments to ThreeNeighbors must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C def outer_normal(self): outer_normal = get_triangle_normal( self.A.get_pos(), self.B.get_pos(), self.C.get_pos()) return outer_normal class Mesh(object): """ Represents a triangular mesh. """ def __init__( self, nodes_file=None, triangles_file=None, system=None, resize=(1.0, 1.0, 1.0), particle_type=-1, particle_mass=1.0, normal=False, check_orientation=True): if (system is None) or (not isinstance(system, espressomd.System)): raise Exception( "Mesh: No system provided or wrong type given. Quitting.") self.system = system self.normal = normal self.nodes_file = nodes_file self.triangles_file = triangles_file self.points = [] self.edges = [] self.triangles = [] self.angles = [] self.neighbors = [] self.ids_extremal_points = [0, 0, 0, 0, 0, 0, 0] if not ((nodes_file is None) or (triangles_file is None)): if not (isinstance(nodes_file, str) and isinstance(triangles_file, str)): raise TypeError("Mesh: Filenames must be strings.") if not ((len(resize) == 3) and isinstance(resize[0], float) and isinstance(resize[1], float) and isinstance(resize[2], float)): raise TypeError("Mesh: Pos must be a list of three floats.") if not isinstance(particle_type, int): raise TypeError("Mesh: particle_type must be integer.") if not isinstance(particle_mass, float): raise TypeError("Mesh: particle_mass must be float.") if not isinstance(normal, bool): raise TypeError("Mesh: normal must be bool.") if not isinstance(check_orientation, bool): raise TypeError("Mesh: check_orientation must be bool.") # reading the mesh point positions from file in_file = open(nodes_file, "r") nodes_coord = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: nodes_coord = filter(None, nodes_coord) # here we have list of lines with triplets of # strings for line in nodes_coord: # extracts coordinates from the string line line = np.array([float(x) for x in line.split()]) coords = np.array(resize) * line tmp_fixed_point = FixedPoint(coords, len(self.points)) self.points.append(tmp_fixed_point) # searching for extremal points IDs x_min = large_number x_max = -large_number y_min = large_number y_max = -large_number z_min = large_number z_max = -large_number for tmp_fixed_point in self.points: coords = tmp_fixed_point.get_pos() if coords[0] < x_min: x_min = coords[0] self.ids_extremal_points[0] = tmp_fixed_point.get_id() if coords[0] > x_max: x_max = coords[0] self.ids_extremal_points[1] = tmp_fixed_point.get_id() if coords[1] < y_min: y_min = coords[1] self.ids_extremal_points[2] = tmp_fixed_point.get_id() if coords[1] > y_max: y_max = coords[1] self.ids_extremal_points[3] = tmp_fixed_point.get_id() if coords[2] < z_min: z_min = coords[2] self.ids_extremal_points[4] = tmp_fixed_point.get_id() if coords[2] > z_max: z_max = coords[2] self.ids_extremal_points[5] = tmp_fixed_point.get_id() # reading the triangle incidences from file in_file = open(triangles_file, "r") triangles_incid = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: triangles_incid = filter(None, triangles_incid) for line in triangles_incid: # extracts incidences from the string line incid = np.array([int(x) for x in line.split()]) tmp_triangle = Triangle( self.points[incid[0]], self.points[incid[1]], self.points[incid[2]]) self.triangles.append(tmp_triangle) if check_orientation is True: # check whether all triangles in file had the same orientation; # if not, correct the orientation self.check_orientation() # creating list of edge incidences from triangle incidences # using temporary list of edge incidences tmp_edge_incidences = [] for triangle in self.triangles: pa = triangle.A.id pb = triangle.B.id pc = triangle.C.id if ([pa, pb] not in tmp_edge_incidences) and ([pb, pa] not in tmp_edge_incidences): tmp_edge_incidences.append([pa, pb]) if ([pb, pc] not in tmp_edge_incidences) and ([pc, pb] not in tmp_edge_incidences): tmp_edge_incidences.append([pb, pc]) if ([pa, pc] not in tmp_edge_incidences) and ([pc, pa] not in tmp_edge_incidences): tmp_edge_incidences.append([pa, pc]) for tmp_incid in tmp_edge_incidences: tmp_edge = Edge( self.points[tmp_incid[0]], self.points[tmp_incid[1]]) self.edges.append(tmp_edge) # creating list angles (former bending incidences) from triangle # incidences for edge in self.edges: pa = edge.A.id pb = edge.B.id pc = -1 pd = -1 detected = 0 # detected = number of detected triangles with current edge common # Algorithm is as follows: we run over all triangles and check # whether two vertices are those from current edge. If we find such triangle, # we put the ID of the third vertex to pc and we check if the orientation pa, pb, pc is the same as # was in the triangle list (meaning, that we found one of the following three triples # in the triangle list: pa, pb, pc or pb, pc, pa or pc, pa, pb). # If we have the same orientation, we set orient = 1, otherwise orient = -1. # Then we go further looking for the second triangle. # The second triangle should have the opposite orientation. # The normal of the first triangle will be P1P2 x P1P3, of the # second triangle will be P2P4 x P2P3 orient = 0 for triangle in self.triangles: # Run over all triangles and determine the two triangles # with the common current edge if (pa == triangle.A.id) and (pb == triangle.B.id): if detected == 0: # if no triangle with such edge was detected before pc = triangle.C.id detected = 1 orient = 1 else: # if this is the second triangle with this edge, # then also quit the for-loop over triangles pd = triangle.C.id break if (pa == triangle.B.id) and (pb == triangle.C.id): if detected == 0: pc = triangle.A.id detected = 1 orient = 1 else: pd = triangle.A.id break if (pa == triangle.C.id) and (pb == triangle.A.id): if detected == 0: pc = triangle.B.id detected = 1 orient = 1 else: pd = triangle.B.id break if (pa == triangle.B.id) and (pb == triangle.A.id): if detected == 0: pc = triangle.C.id detected = 1 orient = -1 else: pd = triangle.C.id break if (pa == triangle.C.id) and (pb == triangle.B.id): if detected == 0: pc = triangle.A.id detected = 1 orient = -1 else: pd = triangle.A.id break if (pa == triangle.A.id) and (pb == triangle.C.id): if detected == 0: pc = triangle.B.id detected = 1 orient = -1 else: pd = triangle.B.id break if orient == 1: tmp = pd pd = pc pc = tmp tmp_angle = Angle( self.points[pc], self.points[pa], self.points[pb], self.points[pd]) self.angles.append(tmp_angle) # creating list of three neighbors for membrane collision if normal is True: for point in self.points: tmp_neighbors = [] # cycle through edges and select those that contain point for edge in self.edges: # take an edge and copy the nodes of the edge to pa, pb if edge.A.id == point.id: tmp_neighbors.append(edge.B) if edge.B.id == point.id: tmp_neighbors.append(edge.A) # create vectors to all neighbors and normalize them tmp_vectors_to_neighbors = [] p_coords = np.array(point.get_pos()) for neighbor in tmp_neighbors: tmp_vector = neighbor.get_pos() - p_coords tmp_length = norm(tmp_vector) if tmp_length < small_epsilon: raise Exception("Mesh: Degenerate edge. Quitting.") tmp_vector /= tmp_length tmp_vectors_to_neighbors.append(tmp_vector) # check all triplets of neighbors and select the one that is best spatially distributed # by adding the corresponding three normalized vectors # and selecting the one with smallest resultant vector n_neighbors = len(tmp_neighbors) min_length = large_number best_neighbors = [ tmp_neighbors[0], tmp_neighbors[1], tmp_neighbors[2]] for i in range(0, n_neighbors): for j in range(i + 1, n_neighbors): for k in range(j + 1, n_neighbors): tmp_result_vector = tmp_vectors_to_neighbors[i] + tmp_vectors_to_neighbors[j] + \ tmp_vectors_to_neighbors[k] tmp_result_vector_length = norm( tmp_result_vector) if tmp_result_vector_length < min_length: min_length = tmp_result_vector_length best_neighbors = [ tmp_neighbors[i], tmp_neighbors[j], tmp_neighbors[k]] # find one triangle that contains this point and compute # its normal vector for triangle in self.triangles: if triangle.A.id == point.id or triangle.B.id == point.id or triangle.C.id == point.id: tmp_normal_triangle = get_triangle_normal( triangle.A.get_pos(), triangle.B.get_pos(), triangle.C.get_pos()) break # properly orient selected neighbors and save them to the # list of neighbors tmp_normal_neighbors = get_triangle_normal( best_neighbors[ 0].get_pos(), best_neighbors[1].get_pos(), best_neighbors[2].get_pos()) tmp_length_normal_triangle = norm(tmp_normal_triangle) tmp_length_normal_neighbors = norm(tmp_normal_neighbors) tmp_product = np.dot(tmp_normal_triangle, tmp_normal_neighbors) / \ (tmp_length_normal_triangle * tmp_length_normal_neighbors) tmp_angle = np.arccos(tmp_product) if tmp_angle > np.pi / 2.0: selected_neighbors = ThreeNeighbors( best_neighbors[0], best_neighbors[1], best_neighbors[2]) else: selected_neighbors = ThreeNeighbors( best_neighbors[0], best_neighbors[2], best_neighbors[1]) self.neighbors.append(selected_neighbors) else: for point in self.points: selected_neighbors = ThreeNeighbors(point, point, point) self.neighbors.append(selected_neighbors) def copy(self, origin=None, particle_type=-1, particle_mass=1.0, rotate=None): mesh = Mesh(system=self.system) mesh.ids_extremal_points = self.ids_extremal_points rotation = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) if rotate is not None: # variables for rotation ca = np.cos(rotate[0]) sa = np.sin(rotate[0]) cb = np.cos(rotate[1]) sb = np.sin(rotate[1]) cc = np.cos(rotate[2]) sc = np.sin(rotate[2]) rotation = np.array( [[cb * cc, sa * sb * cc - ca * sc, sc * sa + cc * sb * ca], [cb * sc, ca * cc + sa * sb * sc, sc * sb * ca - cc * sa], [-sb, cb * sa, ca * cb]]) for point in self.points: # PartPoints are created tmp_pos = point.get_pos() tmp_rotate_pos = np.array(point.get_pos()) # rotation of nodes if rotate is not None: tmp_pos = rotation.dot(tmp_rotate_pos) tmp_pos = [discard_epsilon(tmp_pos[0]), discard_epsilon( tmp_pos[1]), discard_epsilon(tmp_pos[2])] if origin is not None: tmp_pos += np.array(origin) new_part_id = len(self.system.part) # to remember the global id of the ESPResSo # particle self.system.part.add( pos=tmp_pos, type=particle_type, mass=particle_mass, mol_id=particle_type) new_part = self.system.part[new_part_id] new_part_point = PartPoint(new_part, len(mesh.points), new_part_id) mesh.points.append(new_part_point) for edge in self.edges: new_edge = Edge(mesh.points[edge.A.id], mesh.points[edge.B.id]) mesh.edges.append(new_edge) for triangle in self.triangles: new_triangle = Triangle( mesh.points[triangle.A.id], mesh.points[triangle.B.id], mesh.points[triangle.C.id]) mesh.triangles.append(new_triangle) for angle in self.angles: new_angle = Angle( mesh.points[angle.A.id], mesh.points[ angle.B.id], mesh.points[angle.C.id], mesh.points[angle.D.id]) mesh.angles.append(new_angle) for neighbors in self.neighbors: new_neighbors = ThreeNeighbors( mesh.points[neighbors.A.id], mesh.points[neighbors.B.id], mesh.points[neighbors.C.id]) mesh.neighbors.append(new_neighbors) return mesh def check_orientation(self): tmp_triangle_list = [] tmp_triangle_list_ok = [] t_ok = None corrected_triangle = None for triangle in self.triangles: tmp_triangle_list.append(triangle) # move the first triangle to the checked and corrected list tmp_triangle_list_ok.append(tmp_triangle_list[0]) tmp_triangle_list.pop(0) while tmp_triangle_list: i = 0 while i < len(tmp_triangle_list): tmp_triangle = tmp_triangle_list[i] for correct_triangle in tmp_triangle_list_ok: # check if triangles have a common edge, if so, check # orientation are_neighbors = True if tmp_triangle.A.id == correct_triangle.A.id: if tmp_triangle.B.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 124 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 142 else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 134 else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 143 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.A.id == correct_triangle.B.id: if tmp_triangle.B.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 412 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 214 else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 413 else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 314 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.A.id == correct_triangle.C.id: if tmp_triangle.B.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 241 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 421 else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 341 else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 431 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.A.id: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 234 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 243 else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.B.id: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 423 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 324 else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.C.id: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 342 corrected_triangle = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 432 else: are_neighbors = False else: are_neighbors = False if are_neighbors: # move the tmp_triangle to the checked and corrected # list if t_ok: tmp_triangle_list_ok.append(tmp_triangle) else: tmp_triangle_list_ok.append(corrected_triangle) tmp_triangle_list.pop(i) break i += 1 # replace triangles with checked triangles i = 0 for tmp_triangle in tmp_triangle_list_ok: self.triangles[i] = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) i += 1 # all triangles now have the same orientation, check if it is correct tmp_volume = self.volume() if tmp_volume < 0: # opposite orientation, flip all triangles i = 0 for tmp_triangle in self.triangles: self.triangles[i] = Triangle( tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) i += 1 return 0 def surface(self): surface = 0.0 for triangle in self.triangles: surface += triangle.area() return surface def volume(self): volume = 0.0 for triangle in self.triangles: tmp_normal = get_triangle_normal( triangle.A.get_pos(), triangle.B.get_pos(), triangle.C.get_pos()) tmp_normal_length = norm(tmp_normal) tmp_sum_z_coords = 1.0 / 3.0 * \ (triangle.A.get_pos()[2] + triangle.B.get_pos()[ 2] + triangle.C.get_pos()[2]) volume -= triangle.area() * tmp_normal[ 2] / tmp_normal_length * tmp_sum_z_coords return volume def get_n_nodes(self): return len(self.points) def get_n_triangles(self): return len(self.triangles) def get_n_edges(self): return len(self.edges) def output_mesh_triangles(self, triangles_file=None): # this is useful after the mesh correction # output of mesh nodes can be done from OifCell (this is because their # position may change) if triangles_file is None: raise Exception( "OifMesh: No file_name provided for triangles. Quitting.") output_file = open(triangles_file, "w") for t in self.triangles: output_file.write( str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() return 0 def mirror(self, mirror_x=0, mirror_y=0, mirror_z=0, out_file_name=""): if out_file_name == "": raise Exception( "Cell.Mirror: output meshnodes file for new mesh is missing. Quitting.") if (mirror_x != 0 and mirror_x != 1) or (mirror_y != 0 and mirror_y != 1) or (mirror_z != 0 and mirror_z != 1): raise Exception( "Mesh.Mirror: for mirroring only values 0 or 1 are accepted. 1 indicates that the corresponding coordinate will be flipped. Exiting.") if mirror_x + mirror_y + mirror_z > 1: raise Exception( "Mesh.Mirror: flipping allowed only for one axis. Exiting.") if mirror_x + mirror_y + mirror_z == 1: out_file = open(out_file_name, "w") for p in self.points: coor = p.get_pos() if mirror_x == 1: coor[0] *= -1.0 if mirror_y == 1: coor[1] *= -1.0 if mirror_z == 1: coor[2] *= -1.0 out_file.write(custom_str(coor[0]) + " " + custom_str( coor[1]) + " " + custom_str(coor[2]) + "\n") out_file.close() return 0 class OifCellType(object): # analogous to oif_template """ Represents a template for creating elastic objects. """ def __init__( self, nodes_file="", triangles_file="", system=None, resize=(1.0, 1.0, 1.0), ks=0.0, kslin=0.0, kb=0.0, kal=0.0, kag=0.0, kv=0.0, kvisc=0.0, normal=False, check_orientation=True): if (system is None) or (not isinstance(system, espressomd.System)): raise Exception( "OifCellType: No system provided or wrong type. Quitting.") if (nodes_file == "") or (triangles_file == ""): raise Exception( "OifCellType: One of nodesfile or trianglesfile is missing. Quitting.") if not (isinstance(nodes_file, str) and isinstance(triangles_file, str)): raise TypeError("OifCellType: Filenames must be strings.") if not ((len(resize) == 3) and isinstance(resize[0], float) and isinstance(resize[1], float) and isinstance(resize[2], float)): raise TypeError( "OifCellType: Resize must be a list of three floats.") if not (isinstance(ks, float) and isinstance(ks, float) and isinstance(kb, float) and isinstance(kal, float) and isinstance(kag, float) and isinstance(kv, float) and isinstance(kvisc, float)): raise TypeError("OifCellType: Elastic parameters must be floats.") if not isinstance(normal, bool): raise TypeError("OifCellType: normal must be bool.") if not isinstance(check_orientation, bool): raise TypeError("OifCellType: check_orientation must be bool.") if (ks != 0.0) and (kslin != 0.0): raise Exception( "OifCellType: Cannot use linear and nonlinear stretching at the same time. Quitting.") self.system = system self.mesh = Mesh( nodes_file=nodes_file, triangles_file=triangles_file, system=system, resize=resize, normal=normal, check_orientation=check_orientation) self.local_force_interactions = [] self.resize = resize self.ks = ks self.kslin = kslin self.kb = kb self.kal = kal self.kag = kag self.kv = kv self.kvisc = kvisc self.normal = normal if (ks != 0.0) or (kslin != 0.0) or (kb != 0.0) or (kal != 0.0): for angle in self.mesh.angles: r0 = vec_distance(angle.B.get_pos(), angle.C.get_pos()) phi = angle_btw_triangles( angle.A.get_pos(), angle.B.get_pos(), angle.C.get_pos(), angle.D.get_pos()) area1 = area_triangle( angle.A.get_pos(), angle.B.get_pos(), angle.C.get_pos()) area2 = area_triangle( angle.D.get_pos(), angle.B.get_pos(), angle.C.get_pos()) tmp_local_force_inter = OifLocalForces( r0=r0, ks=ks, kslin=kslin, phi0=phi, kb=kb, A01=area1, A02=area2, kal=kal, kvisc=kvisc) self.local_force_interactions.append( [tmp_local_force_inter, [angle.A, angle.B, angle.C, angle.D]]) self.system.bonded_inter.add(tmp_local_force_inter) if (kag != 0.0) or (kv != 0.0): surface = self.mesh.surface() volume = self.mesh.volume() self.global_force_interaction = OifGlobalForces( A0_g=surface, ka_g=kag, V0=volume, kv=kv) self.system.bonded_inter.add(self.global_force_interaction) def print_info(self): print("\nThe following OifCellType was created: ") print("\t nodes_file: " + self.mesh.nodes_file) print("\t triangles_file: " + self.mesh.triangles_file) print("\t n_nodes: " + str(self.mesh.get_n_nodes())) print("\t n_triangles: " + str(self.mesh.get_n_triangles())) print("\t n_edges: " + str(self.mesh.get_n_edges())) print("\t ks: " + custom_str(self.ks)) print("\t kslin: " + custom_str(self.kslin)) print("\t kb: " + custom_str(self.kb)) print("\t kal: " + custom_str(self.kal)) print("\t kag: " + custom_str(self.kag)) print("\t kv: " + custom_str(self.kv)) print("\t kvisc: " + custom_str(self.kvisc)) print("\t normal: " + str(self.normal)) print("\t resize: " + str(self.resize)) print(" ") class OifCell(object): """ Represents a concrete elastic object. """ def __init__(self, cell_type=None, origin=None, particle_type=None, particle_mass=1.0, rotate=None): if (cell_type is None) or (not isinstance(cell_type, OifCellType)): raise Exception( "OifCell: No cellType provided or wrong type. Quitting.") if (origin is None) or \ (not ((len(origin) == 3) and isinstance(origin[0], float) and isinstance(origin[1], float) and isinstance(origin[2], float))): raise TypeError("Origin must be tuple.") if (particle_type is None) or (not isinstance(particle_type, int)): raise Exception( "OifCell: No particle_type specified or wrong type. Quitting.") if not isinstance(particle_mass, float): raise Exception("OifCell: particle mass must be float.") if (rotate is not None) and not ((len(rotate) == 3) and isinstance(rotate[0], float) and isinstance(rotate[1], float) and isinstance(rotate[2], float)): raise TypeError("Rotate must be list of three floats.") self.cell_type = cell_type self.cell_type.system.max_oif_objects = self.cell_type.system.max_oif_objects + 1 self.mesh = cell_type.mesh.copy( origin=origin, particle_type=particle_type, particle_mass=particle_mass, rotate=rotate) self.particle_mass = particle_mass self.particle_type = particle_type self.origin = origin self.rotate = rotate for inter in self.cell_type.local_force_interactions: esp_inter = inter[0] points = inter[1] n_points = len(points) if n_points == 2: p0 = self.mesh.points[ points[0].id] # Getting PartPoints from id's of FixedPoints p1 = self.mesh.points[points[1].id] p0.part.add_bond((esp_inter, p1.part_id)) if n_points == 3: p0 = self.mesh.points[points[0].id] p1 = self.mesh.points[points[1].id] p2 = self.mesh.points[points[2].id] p0.part.add_bond((esp_inter, p1.part_id, p2.part_id)) if n_points == 4: p0 = self.mesh.points[points[0].id] p1 = self.mesh.points[points[1].id] p2 = self.mesh.points[points[2].id] p3 = self.mesh.points[points[3].id] p1.part.add_bond( (esp_inter, p0.part_id, p2.part_id, p3.part_id)) if (self.cell_type.kag != 0.0) or (self.cell_type.kv != 0.0): for triangle in self.mesh.triangles: triangle.A.part.add_bond( (self.cell_type.global_force_interaction, triangle.B.part_id, triangle.C.part_id)) # setting the out_direction interaction for membrane collision if self.cell_type.mesh.normal is True: tmp_out_direction_interaction = OifOutDirection() # this interaction could be just one for all objects, but here it # is created multiple times self.cell_type.system.bonded_inter.add( tmp_out_direction_interaction) for p in self.mesh.points: p.part.add_bond( (tmp_out_direction_interaction, self.mesh.neighbors[ p.id].A.part_id, self.mesh.neighbors[p.id].B.part_id, self.mesh.neighbors[p.id].C.part_id)) def get_origin(self): center = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: center += p.get_pos() return center / len(self.mesh.points) def set_origin(self, new_origin=(0.0, 0.0, 0.0)): old_origin = self.get_origin() for p in self.mesh.points: new_position = p.get_pos() - old_origin + new_origin p.set_pos(new_position) def get_approx_origin(self): approx_center = np.array([0.0, 0.0, 0.0]) for id in self.mesh.ids_extremal_points: approx_center += self.mesh.points[id].get_pos() return approx_center / len(self.mesh.ids_extremal_points) def get_origin_folded(self): origin = self.get_origin() return np.mod(origin, self.cell_type.system.box_l) def get_velocity(self): velocity = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: velocity += p.get_vel() return velocity / len(self.mesh.points) def set_velocity(self, new_velocity=(0.0, 0.0, 0.0)): for p in self.mesh.points: p.set_vel(new_velocity) def pos_bounds(self): x_min = large_number x_max = -large_number y_min = large_number y_max = -large_number z_min = large_number z_max = -large_number for p in self.mesh.points: coords = p.get_pos() if coords[0] < x_min: x_min = coords[0] if coords[0] > x_max: x_max = coords[0] if coords[1] < y_min: y_min = coords[1] if coords[1] > y_max: y_max = coords[1] if coords[2] < z_min: z_min = coords[2] if coords[2] > z_max: z_max = coords[2] return [x_min, x_max, y_min, y_max, z_min, z_max] def surface(self): return self.mesh.surface() def volume(self): return self.mesh.volume() def diameter(self): max_distance = 0.0 n_points = len(self.mesh.points) for i in range(0, n_points): for j in range(i + 1, n_points): p1 = self.mesh.points[i].get_pos() p2 = self.mesh.points[j].get_pos() tmp_dist = vec_distance(p1, p2) if tmp_dist > max_distance: max_distance = tmp_dist return max_distance def get_n_nodes(self): return self.mesh.get_n_nodes() def set_force(self, new_force=(0.0, 0.0, 0.0)): for p in self.mesh.points: p.set_force(new_force) # this is not implemented # def kill_motion(self): # for p in self.mesh.points: # p.kill_motion() # this is not implemented # def unkill_motion(self): # for p in self.mesh.points: # p.unkill_motion() def output_vtk_pos(self, file_name=None): if file_name is None: raise Exception( "OifCell: No file_name provided for vtk output. Quitting") n_points = len(self.mesh.points) n_triangles = len(self.mesh.triangles) output_file = open(file_name, "w") output_file.write("# vtk DataFile Version 3.0\n") output_file.write("Data\n") output_file.write("ASCII\n") output_file.write("DATASET POLYDATA\n") output_file.write("POINTS " + str(n_points) + " float\n") for p in self.mesh.points: coords = p.get_pos() output_file.write(custom_str(coords[0]) + " " + custom_str( coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.write("TRIANGLE_STRIPS " + str( n_triangles) + " " + str(4 * n_triangles) + "\n") for t in self.mesh.triangles: output_file.write( "3 " + str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() def output_vtk_pos_folded(self, file_name=None): if file_name is None: raise Exception( "OifCell: No file_name provided for vtk output. Quitting.") n_points = len(self.mesh.points) n_triangles = len(self.mesh.triangles) # get coordinates of the origin center = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: center += p.get_pos() center /= len(self.mesh.points) center_folded = np.floor(center / self.cell_type.system.box_l) # this gives how many times the origin is folded in all three # directions output_file = open(file_name, "w") output_file.write("# vtk DataFile Version 3.0\n") output_file.write("Data\n") output_file.write("ASCII\n") output_file.write("DATASET POLYDATA\n") output_file.write("POINTS " + str(n_points) + " float\n") for p in self.mesh.points: coords = p.get_pos() - center_folded * self.cell_type.system.box_l output_file.write(custom_str(coords[0]) + " " + custom_str( coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.write("TRIANGLE_STRIPS " + str( n_triangles) + " " + str(4 * n_triangles) + "\n") for t in self.mesh.triangles: output_file.write( "3 " + str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() def append_point_data_to_vtk(self, file_name=None, data_name=None, data=None, first_append=None): if file_name is None: raise Exception( "OifCell: append_point_data_to_vtk: No file_name provided. Quitting.") if data is None: raise Exception( "OifCell: append_point_data_to_vtk: No data provided. Quitting.") if data_name is None: raise Exception( "OifCell: append_point_data_to_vtk: No data_name provided. Quitting.") if first_append is None: raise Exception("OifCell: append_point_data_to_vtk: Need to know whether this is the first data list to be " "appended for this file. Quitting.") n_points = self.get_n_nodes() if (len(data) != n_points): raise Exception( "OifCell: append_point_data_to_vtk: Number of data points does not match number of mesh points. Quitting.") output_file = open(file_name, "a") if first_append is True: output_file.write("POINT_DATA " + str(n_points) + "\n") output_file.write("SCALARS " + data_name + " float 1\n") output_file.write("LOOKUP_TABLE default\n") for p in self.mesh.points: output_file.write(str(data[p.id]) + "\n") output_file.close() def output_raw_data(self, file_name=None, data=None): if file_name is None: raise Exception( "OifCell: output_raw_data: No file_name provided. Quitting.") if data is None: raise Exception( "OifCell: output_raw_data: No data provided. Quitting.") n_points = self.get_n_nodes() if (len(data) != n_points): raise Exception( "OifCell: output_raw_data: Number of data points does not match number of mesh points. Quitting.") output_file = open(file_name, "w") for p in self.mesh.points: output_file.write(" ".join(map(str, data[p.id])) + "\n") output_file.close() def output_mesh_points(self, file_name=None): if file_name is None: raise Exception( "OifCell: No file_name provided for mesh nodes output. Quitting.") output_file = open(file_name, "w") center = self.get_origin() for p in self.mesh.points: coords = p.get_pos() - center output_file.write(custom_str(coords[0]) + " " + custom_str( coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.close() def set_mesh_points(self, file_name=None): if file_name is None: raise Exception( "OifCell: No file_name provided for set_mesh_points. Quitting.") center = self.get_origin() n_points = self.get_n_nodes() in_file = open(file_name, "r") nodes_coord = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: nodes_coord = filter(None, nodes_coord) # here we have list of lines with triplets of # strings if len(nodes_coord) != n_points: raise Exception("OifCell: Mesh nodes not set to new positions: " "number of lines in the file does not equal number of Cell nodes. Quitting.") else: i = 0 for line in nodes_coord: # extracts coordinates from the string line line = line.split() new_position = np.array(line).astype(np.float) + center self.mesh.points[i].set_pos(new_position) i += 1 def print_info(self): print("\nThe following OifCell was created: ") print("\t particle_mass: " + custom_str(self.particle_mass)) print("\t particle_type: " + str(self.particle_type)) print("\t rotate: " + str(self.rotate)) print("\t origin: " + str(self.origin[0]) + " " + str( self.origin[1]) + " " + str(self.origin[2])) def elastic_forces( self, el_forces=(0, 0, 0, 0, 0, 0), f_metric=(0, 0, 0, 0, 0, 0), vtk_file=None, raw_data_file=None): # the order of parameters in elastic_forces and in f_metric is as follows (ks, kb, kal, kag, kv, total) # vtk_file means that a vtk file for visualisation of elastic forces will be written # raw_data_file means that just the elastic forces will be written into # the output file stretching_forces_list = [] bending_forces_list = [] local_area_forces_list = [] global_area_forces_list = [] volume_forces_list = [] elastic_forces_list = [] stretching_forces_norms_list = [] bending_forces_norms_list = [] local_area_forces_norms_list = [] global_area_forces_norms_list = [] volume_forces_norms_list = [] elastic_forces_norms_list = [] ks_f_metric = 0.0 kb_f_metric = 0.0 kal_f_metric = 0.0 kag_f_metric = 0.0 kv_f_metric = 0.0 total_f_metric = 0.0 for i in range(0, 6): if (el_forces[i] != 0) and (el_forces[i] != 1): raise Exception("OifCell: elastic_forces: Incorrect argument. el_forces has to be a sixtuple of 0s and 1s, " "specifying which elastic forces will be calculated. The order in the sixtuple is (ks, kb, " "kal, kag, kv, total).") for i in range(0, 6): if (f_metric[i] != 0) and (f_metric[i] != 1): raise Exception("OifCell: elastic_forces: Incorrect argument. f_metric has to be a sixtuple of 0s and 1s, " "specifying which f_metric will be calculated. The order in the sixtuple is (ks, kb, kal, " "kag, kv, total)") # calculation of stretching forces and f_metric if (el_forces[0] == 1) or (el_forces[5] == 1) or (f_metric[0] == 1) or (f_metric[5] == 1): # initialize list stretching_forces_list = [] for p in self.mesh.points: stretching_forces_list.append([0.0, 0.0, 0.0]) # calculation uses edges, but results are stored for nodes for e in self.mesh.edges: a_current_pos = e.A.get_pos() b_current_pos = e.B.get_pos() a_orig_pos = self.cell_type.mesh.points[e.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[e.B.id].get_pos() current_dist = e.length() orig_dist = vec_distance(a_orig_pos, b_orig_pos) tmp_stretching_force = oif_calc_stretching_force( self.cell_type.ks, a_current_pos, b_current_pos, orig_dist, current_dist) stretching_forces_list[e.A.id] += tmp_stretching_force stretching_forces_list[e.B.id] -= tmp_stretching_force # calculation of stretching f_metric, if needed if f_metric[0] == 1: ks_f_metric = 0.0 for p in self.mesh.points: ks_f_metric += norm(stretching_forces_list[p.id]) # calculation of bending forces and f_metric if (el_forces[1] == 1) or (el_forces[5] == 1) or (f_metric[1] == 1) or (f_metric[5] == 1): # initialize list bending_forces_list = [] for p in self.mesh.points: bending_forces_list.append([0.0, 0.0, 0.0]) # calculation uses bending incidences, but results are stored for # nodes for angle in self.mesh.angles: a_current_pos = angle.A.get_pos() b_current_pos = angle.B.get_pos() c_current_pos = angle.C.get_pos() d_current_pos = angle.D.get_pos() a_orig_pos = self.cell_type.mesh.points[angle.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[angle.B.id].get_pos() c_orig_pos = self.cell_type.mesh.points[angle.C.id].get_pos() d_orig_pos = self.cell_type.mesh.points[angle.D.id].get_pos() current_angle = angle.size() orig_angle = angle_btw_triangles( a_orig_pos, b_orig_pos, c_orig_pos, d_orig_pos) tmp_bending_forces = oif_calc_bending_force( self.cell_type.kb, a_current_pos, b_current_pos, c_current_pos, d_current_pos, orig_angle, current_angle) tmp_bending_force1 = np.array( [tmp_bending_forces[0], tmp_bending_forces[1], tmp_bending_forces[2]]) tmp_bending_force2 = np.array( [tmp_bending_forces[3], tmp_bending_forces[4], tmp_bending_forces[5]]) bending_forces_list[angle.A.id] += tmp_bending_force1 bending_forces_list[angle.B.id] -= 0.5 * \ tmp_bending_force1 + 0.5 * tmp_bending_force2 bending_forces_list[angle.C.id] -= 0.5 * \ tmp_bending_force1 + 0.5 * tmp_bending_force2 bending_forces_list[angle.D.id] += tmp_bending_force2 # calculation of bending f_metric, if needed if f_metric[1] == 1: kb_f_metric = 0.0 for p in self.mesh.points: kb_f_metric += norm(bending_forces_list[p.id]) # calculation of local area forces and f_metric if (el_forces[2] == 1) or (el_forces[5] == 1) or (f_metric[2] == 1) or (f_metric[5] == 1): # initialize list local_area_forces_list = [] for p in self.mesh.points: local_area_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() a_orig_pos = self.cell_type.mesh.points[t.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[t.B.id].get_pos() c_orig_pos = self.cell_type.mesh.points[t.C.id].get_pos() current_area = t.area() orig_area = area_triangle(a_orig_pos, b_orig_pos, c_orig_pos) tmp_local_area_forces = oif_calc_local_area_force( self.cell_type.kal, a_current_pos, b_current_pos, c_current_pos, orig_area, current_area) local_area_forces_list[t.A.id] += np.array( [tmp_local_area_forces[0], tmp_local_area_forces[1], tmp_local_area_forces[2]]) local_area_forces_list[t.B.id] += np.array( [tmp_local_area_forces[3], tmp_local_area_forces[4], tmp_local_area_forces[5]]) local_area_forces_list[t.C.id] += np.array( [tmp_local_area_forces[6], tmp_local_area_forces[7], tmp_local_area_forces[8]]) # calculation of local area f_metric, if needed if f_metric[2] == 1: kal_f_metric = 0.0 for p in self.mesh.points: kal_f_metric += norm(local_area_forces_list[p.id]) # calculation of global area forces and f_metric if (el_forces[3] == 1) or (el_forces[5] == 1) or (f_metric[3] == 1) or (f_metric[5] == 1): # initialize list global_area_forces_list = [] for p in self.mesh.points: global_area_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() current_surface = self.mesh.surface() orig_surface = self.cell_type.mesh.surface() tmp_global_area_forces = oif_calc_global_area_force( self.cell_type.kag, a_current_pos, b_current_pos, c_current_pos, orig_surface, current_surface) global_area_forces_list[t.A.id] += np.array( [tmp_global_area_forces[0], tmp_global_area_forces[1], tmp_global_area_forces[2]]) global_area_forces_list[t.B.id] += np.array( [tmp_global_area_forces[3], tmp_global_area_forces[4], tmp_global_area_forces[5]]) global_area_forces_list[t.C.id] += np.array( [tmp_global_area_forces[6], tmp_global_area_forces[7], tmp_global_area_forces[8]]) # calculation of global area f_metric, if needed if f_metric[3] == 1: kag_f_metric = 0.0 for p in self.mesh.points: kag_f_metric += norm(global_area_forces_list[p.id]) # calculation of volume forces and f_metric if (el_forces[4] == 1) or (el_forces[5] == 1) or (f_metric[4] == 1) or (f_metric[5] == 1): # initialize list volume_forces_list = [] for p in self.mesh.points: volume_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() current_volume = self.mesh.volume() orig_volume = self.cell_type.mesh.volume() tmp_volume_force = oif_calc_volume_force( self.cell_type.kv, a_current_pos, b_current_pos, c_current_pos, orig_volume, current_volume) volume_forces_list[t.A.id] += tmp_volume_force volume_forces_list[t.B.id] += tmp_volume_force volume_forces_list[t.C.id] += tmp_volume_force # calculation of volume f_metric, if needed if f_metric[4] == 1: kv_f_metric = 0.0 for p in self.mesh.points: kv_f_metric += norm(volume_forces_list[p.id]) # calculation of total elastic forces and f_metric if (el_forces[5] == 1) or (f_metric[5] == 1): elastic_forces_list = [] for p in self.mesh.points: total_elastic_forces = stretching_forces_list[p.id] + bending_forces_list[p.id] + \ local_area_forces_list[p.id] + global_area_forces_list[p.id] + \ volume_forces_list[p.id] elastic_forces_list.append(total_elastic_forces) # calculation of total f_metric, if needed if f_metric[5] == 1: total_f_metric = 0.0 for p in self.mesh.points: total_f_metric += norm(elastic_forces_list[p.id]) # calculate norms of resulting forces if (el_forces[0] + el_forces[1] + el_forces[2] + el_forces[3] + el_forces[4] + el_forces[5]) != 0: if el_forces[0] == 1: stretching_forces_norms_list = [] for p in self.mesh.points: stretching_forces_norms_list.append( norm(stretching_forces_list[p.id])) if el_forces[1] == 1: bending_forces_norms_list = [] for p in self.mesh.points: bending_forces_norms_list.append( norm(bending_forces_list[p.id])) if el_forces[2] == 1: local_area_forces_norms_list = [] for p in self.mesh.points: local_area_forces_norms_list.append( norm(local_area_forces_list[p.id])) if el_forces[3] == 1: global_area_forces_norms_list = [] for p in self.mesh.points: global_area_forces_norms_list.append( norm(global_area_forces_list[p.id])) if el_forces[4] == 1: volume_forces_norms_list = [] for p in self.mesh.points: volume_forces_norms_list.append( norm(volume_forces_list[p.id])) if el_forces[5] == 1: elastic_forces_norms_list = [] for p in self.mesh.points: elastic_forces_norms_list.append( norm(elastic_forces_list[p.id])) # output vtk (folded) if vtk_file is not None: if el_forces == (0, 0, 0, 0, 0, 0): raise Exception("OifCell: elastic_forces: The option elastic_forces was not used. " "Nothing to output to vtk file.") self.output_vtk_pos_folded(vtk_file) first = True if el_forces[0] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="ks_f_metric", data=stretching_forces_norms_list, first_append=first) first = False if el_forces[1] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="kb_f_metric", data=bending_forces_norms_list, first_append=first) first = False if el_forces[2] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="kal_f_metric", data=local_area_forces_norms_list, first_append=first) first = False if el_forces[3] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="kag_f_metric", data=global_area_forces_norms_list, first_append=first) first = False if el_forces[4] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="kav_f_metric", data=volume_forces_norms_list, first_append=first) first = False if el_forces[5] == 1: self.append_point_data_to_vtk( file_name=vtk_file, data_name="total_f_metric", data=elastic_forces_norms_list, first_append=first) first = False # output raw data if raw_data_file is not None: if (el_forces[0] + el_forces[1] + el_forces[2] + el_forces[3] + el_forces[4] + el_forces[5]) != 1: raise Exception("OifCell: elastic_forces: Only one type of elastic forces can be written into one " "raw_data_file. If you need several, please call OifCell.elastic_forces multiple times - " "once per elastic force.") if el_forces[0] == 1: self.output_raw_data( file_name=raw_data_file, data=stretching_forces_list) if el_forces[1] == 1: self.output_raw_data( file_name=raw_data_file, data=bending_forces_list) if el_forces[2] == 1: self.output_raw_data( file_name=raw_data_file, data=local_area_forces_list) if el_forces[3] == 1: self.output_raw_data( file_name=raw_data_file, data=global_area_forces_list) if el_forces[4] == 1: self.output_raw_data( file_name=raw_data_file, data=volume_forces_list) if el_forces[5] == 1: self.output_raw_data( file_name=raw_data_file, data=elastic_forces_list) # return f_metric if f_metric[0] + f_metric[1] + f_metric[2] + f_metric[3] + f_metric[4] + f_metric[5] > 0: results = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] if f_metric[0] == 1: results[0] = ks_f_metric if f_metric[1] == 1: results[1] = kb_f_metric if f_metric[2] == 1: results[2] = kal_f_metric if f_metric[3] == 1: results[3] = kag_f_metric if f_metric[4] == 1: results[4] = kv_f_metric if f_metric[5] == 1: results[5] = total_f_metric return results else: return 0
gpl-3.0
-7,041,708,817,716,287,000
46.409091
200
0.496069
false
bcongdon/Data-Science-Projects
election_tweets/tweet_tokenizer.py
1
1301
import re,json import numpy as np import scipy.stats as sp emoticons_str = r""" (?: [:=;] # Eyes [oO\-]? # Nose (optional) [D\)\]\(\]/\\OpP] # Mouth )""" regex_str = [ emoticons_str, r'<[^>]+>', # HTML tags r'(?:@[\w_]+)', # @-mentions r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and ' r'(?:[\w_]+)', # other words r'(?:\S)' # anything else ] tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE) emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE) def tokenize(s): return tokens_re.findall(s) def preprocess(s, lowercase=False): tokens = tokenize(s) if lowercase: tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens] return tokens if __name__ == "__main__": total_tokens = list() with open('output.json','r') as f: for line in f: tweet = json.loads(line) tokens = preprocess(tweet['text']) total_tokens += tokens nptok = np.array(total_tokens) freq = sp.itemfreq(nptok) print freq
gpl-3.0
-1,501,399,698,604,844,000
26.680851
92
0.500384
false
bikash/h2o-dev
py2/h2o_util.py
1
29314
import subprocess import gzip, shutil, random, time, re, copy import os, zipfile, json, csv import sys, math import errno from h2o_test import verboseprint, dump_json import h2o_print as h2p #************************************************************************ # stuff from ray # list or tuple is okay. so are dicts. strings not okay # or should we strictly check for list,tuple? def list_to_dict(l, key): # assert not isinstance(l, basestring) assert isinstance(l, (list, tuple, dict)) # print 'list_to_dict key: ', key keySplit = key.split("/") result = {} for v in l: # print 'list_to_dict v: ', v k = followPath(v, keySplit) # print 'list_to_dict k: ', k result[k] = v print "list_to_dict created dict with %s entries" % len(result) return result # Assertion-type stuff def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise # was d mutable to caller before? suppose not if the local assign is a new object. def followPath(d, path_elems): dCopy = copy.deepcopy(d) for path_elem in path_elems: # print "followPath path_elem:", path_elem if "" != path_elem: idx = -1 if path_elem.endswith("]"): idx = int(path_elem[path_elem.find("[") + 1:path_elem.find("]")]) path_elem = path_elem[:path_elem.find("[")] assert path_elem in dCopy, "Failed to find key: " + path_elem + " in dict: " + dCopy # does this create a new object so the caller is not affected? if -1 == idx: dCopy = dCopy[path_elem] else: dCopy = dCopy[path_elem][idx] return dCopy def assertKeysExist(d, path, keys): path_elems = path.split("/") d = followPath(d, path_elems) for key in keys: assert key in d, "Failed to find key: " + key + " in dict: " + repr(d) def assertKeysExistAndNonNull(d, path, keys): path_elems = path.split("/") d = followPath(d, path_elems) for key in keys: assert key in d, "Failed to find key: " + key + " in dict: " + repr(d) assert d[key] != None, "Value unexpectedly null: " + key + " in dict: " + repr(d) def assertKeysDontExist(d, path, keys): path_elems = path.split("/") d = followPath(d, path_elems) for key in keys: assert key not in d, "Unexpectedly found key: " + key + " in dict: " + repr(d) #************************************************************************ # Return file size. def get_file_size(f): return os.path.getsize(f) # Splits file into chunks of given size and returns an iterator over chunks. def iter_chunked_file(file, chunk_size=2048): return iter(lambda: file.read(chunk_size), '') # operations to get bit patterns for fp # Python internally uses the native endianity and 64-bits for floats # Java floatToBits is the thing to convert fp to long bits # if it's real, use this to convert. All reals should match # long bits = Double.doubleToLongBits(myDouble); # System.out.println(Long.toBinaryString(bits)); import struct # Q is unsigned long long. 8 bytes # d is double float def doubleToUnsignedLongLong(d): s = struct.pack('>d', d) return struct.unpack('>Q', s)[0] # floatToBits(173.3125) # 1127043072 # hex(_) # '0x432d5000' # You can reverse the order of operations to round-trip: def unsignedLongLongToDouble(Q): s = struct.pack('>Q', Q) return struct.unpack('>d', s)[0] # bitsToFloat(0x432d5000) # 173.3125 # takes fp or list of fp and returns same with just two digits of precision # using print rounding def twoDecimals(l): if isinstance(l, (list, tuple)): return ["%.2f" % v for v in l] elif isinstance(l, basestring): return "%s" % l elif l is None: return None else: return "%.2f" % l # a short quick version for relative comparion. But it's probably better to use approxEqual below # the subsequent ones might be prefered, especially assertAlmostEqual( # http://en.wikipedia.org/wiki/Relative_difference # http://stackoverflow.com/questions/4028889/floating-point-equality-in-python # def fp_approxEqual(a, b, rel): # c = abs(a-b) / max(abs(a), abs(b)) # print "actual relative diff: %s allowed relative diff: %s" % (c, rel) # return c < rel # Generic "approximately equal" function for any object type, with customisable error tolerance. # When called with float arguments, approxEqual(x, y[, tol[, rel]) compares x and y numerically, # and returns True if y is within either absolute error tol or relative error rel of x, # otherwise return False. # The function defaults to sensible default values for tol and rel. # or any other pair of objects, approxEqual() looks for a method __approxEqual__ and, if found, # calls it with arbitrary optional arguments. # This allows types to define their own concept of "close enough". def _float_approxEqual(x, y, tol=1e-18, rel=1e-7, **kwargs): if tol is rel is None: raise TypeError('cannot specify both absolute and relative errors are None') tests = [] if tol is not None: tests.append(abs(tol)) if rel is not None: tests.append(abs(rel*x)) assert tests return abs(abs(x) - abs(y)) <= max(tests) # from http://code.activestate.com/recipes/577124-approximately-equal/ def approxEqual(x, y, *args, **kwargs): """approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False Return True if x and y are approximately equal, otherwise False. If x and y are floats, return True if y is within either absolute error tol or relative error rel of x. You can disable either the absolute or relative check by passing None as tol or rel (but not both). For any other objects, x and y are checked in that order for a method __approxEqual__, and the result of that is returned as a bool. Any optional arguments are passed to the __approxEqual__ method. __approxEqual__ can return NotImplemented to signal that it doesn't know how to perform that specific comparison, in which case the other object is checked instead. If neither object have the method, or both defer by returning NotImplemented, approxEqual falls back on the same numeric comparison used for floats. >>> almost_equal(1.2345678, 1.2345677) True >>> almost_equal(1.234, 1.235) False """ if not (type(x) is type(y) is float): # Skip checking for __approxEqual__ in the common case of two floats. methodname = '__approxEqual__' # Allow the objects to specify what they consider "approximately equal", # giving precedence to x. If either object has the appropriate method, we # pass on any optional arguments untouched. for a,b in ((x, y), (y, x)): try: method = getattr(a, methodname) except AttributeError: continue else: result = method(b, *args, **kwargs) if result is NotImplemented: print "WARNING: NotImplemented approxEqual for types" continue return bool(result) # If we get here without returning, then neither x nor y knows how to do an # approximate equal comparison (or are both floats). Fall back to a numeric # comparison. return _float_approxEqual(x, y, *args, **kwargs) # note this can take 'tol' and 'rel' parms for the float case # just wraps approxEqual in an assert with a good print message def assertApproxEqual(x, y, msg='', **kwargs): if not approxEqual(x, y, msg=msg, **kwargs): m = msg + '. h2o_util.assertApproxEqual failed comparing %s and %s. %s.' % (x, y, kwargs) raise Exception(m) def cleanseInfNan(value): # change the strings returned in h2o json to the IEEE number values translate = { 'NaN': float('NaN'), 'Infinity': float('Inf'), '-Infinity': -float('Inf'), } if str(value) in translate: value = translate[str(value)] return value # use a random or selected fp format from the choices # for testing different fp representations # 'only' can be e, f or g, to restrict the choices # it will wrap the 0-47 until the group (modulo e) def fp_format(val=None, sel=None, only=None): def e0(val): return "%e" % val def e1(val): return "%20e" % val def e2(val): return "%-20e" % val def e3(val): return "%020e" % val def e4(val): return "%+e" % val def e5(val): return "%+20e" % val def e6(val): return "%+-20e" % val def e7(val): return "%+020e" % val def e8(val): return "%.4e" % val def e9(val): return "%20.4e" % val def e10(val): return "%-20.4e" % val def e11(val): return "%020.4e" % val def e12(val): return "%+.4e" % val def e13(val): return "%+20.4e" % val def e14(val): return "%+-20.4e" % val def e15(val): return "%+020.4e" % val def f0(val): return "%f" % val def f1(val): return "%20f" % val def f2(val): return "%-20f" % val def f3(val): return "%020f" % val def f4(val): return "%+f" % val def f5(val): return "%+20f" % val def f6(val): return "%+-20f" % val def f7(val): return "%+020f" % val def f8(val): return "%.4f" % val def f9(val): return "%20.4f" % val def f10(val): return "%-20.4f" % val def f11(val): return "%020.4f" % val def f12(val): return "%+.4f" % val def f13(val): return "%+20.4f" % val def f14(val): return "%+-20.4f" % val def f15(val): return "%+020.4f" % val def g0(val): return "%g" % val def g1(val): return "%20g" % val def g2(val): return "%-20g" % val def g3(val): return "%020g" % val def g4(val): return "%+g" % val def g5(val): return "%+20g" % val def g6(val): return "%+-20g" % val def g7(val): return "%+020g" % val def g8(val): return "%.4g" % val def g9(val): return "%20.4g" % val def g10(val): return "%-20.4g" % val def g11(val): return "%020.4g" % val def g12(val): return "%+.4g" % val def g13(val): return "%+20.4g" % val def g14(val): return "%+-20.4g" % val def g15(val): return "%+020.4g" % val # try a neat way to use a dictionary to case select functions # didn't want to use python advanced string format with variable as format # because they do left/right align outside of that?? caseList=[ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15, ] if not val: return len(caseList) if sel: if sel<0 or sel>=len(caseList): raise Exception("sel out of range in write_syn_dataset:", sel) choice = sel else: # pick one randomly if no sel choice = random.randint(0,len(caseList)-1) # print "Using fp format case", choice SUBGRPS = 3 SUBGRP_SIZE = len(caseList) / SUBGRPS # should be int assert math.floor(SUBGRP_SIZE)==SUBGRP_SIZE, "You got a code problem in h2o_util.fp_format" if only: # make choice modulo 3 (can update if more subgroups are added choice = choice % SUBGRPS assert choice >= 0 and choice < SUBGRP_SIZE # now add a base offset = subgrou size. # (assume we keep in sync with the subgroup sizes above) if only=='e': choice += 0 elif only=='f': choice += 16 elif only=='g': choice += 32 else: # if a random choice, we should never get here because it's bounded to length of the list above raise Exception("Bad param combo of only: %s and sel: % in h2o_util.fp_format()" % (only, sel)) f = caseList[choice] return f(val) # http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ # given [2, 3, 5] it returns 0 (the index of the first element) with probability 0.2, # 1 with probability 0.3 and 2 with probability 0.5. # The weights need not sum up to anything in particular, and can actually be # arbitrary Python floating point numbers. # The weights need to cover the whole list? otherwise you don't get the rest of the choises # random_data = [6,7,8] # weights = [2,3,5] # d = random_data[h2o_util.weighted_choice(weights)] def weighted_choice(weights): rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: return i # x = choice_with_probability( [('one',0.25), ('two',0.25), ('three',0.5)] ) # need to sum to 1 or less. check error case if you go negative def choice_with_probability(tupleList): n = random.uniform(0, 1) for item, prob in tupleList: if n < prob: break n = n - prob if n < 0: raise Exception("h2o_util.choice_with_probability() error, prob's sum > 1") return item # pick a random param from a dictionary of lists of params def pickRandParams(paramDict, params): randomGroupSize = random.randint(1,len(paramDict)) for i in range(randomGroupSize): randomKey = random.choice(paramDict.keys()) randomV = paramDict[randomKey] randomValue = random.choice(randomV) params[randomKey] = randomValue # this reads a single col out a csv file into a list, without using numpy # so we can port some jenkins tests without needing numpy def file_read_csv_col(csvPathname, col=0, skipHeader=True, datatype='float', preview=5): # only can skip one header line. numpy provides a number N. could update to that. with open(csvPathname, 'rb') as f: reader = csv.reader(f, quoting=csv.QUOTE_NONE) # no extra handling for quotes print "csv read of", csvPathname, "column", col # print "Preview of 1st %s lines:" % preview rowNum = 0 dataList = [] lastRowLength = None try: for row in reader: if skipHeader and rowNum==0: print "Skipping header in this csv" else: NA = False if col > len(row)-1: print "col (zero indexed): %s points past the # entries in this row %s" % (col, row) if lastRowLength and len(row)!=lastRowLength: print "Current row length: %s is different than last row length: %s" % (row, lastRowLength) if col > len(row)-1: colData = None else: colData = row[col] # only print first 5 for seeing # don't print big col cases if rowNum < preview and len(row) <= 10: print colData dataList.append(colData) rowNum += 1 if rowNum%10==0: # print rowNum pass lastRowLength = len(row) except csv.Error, e: sys.exit('file %s, line %d: %s' % (csvPathname, reader.line_num, e)) # now we have a list of strings # change them to float if asked for, or int # elimate empty strings if datatype=='float': D1 = [float(i) for i in dataList if i] if datatype=='int': D1 = [int(i) for i in dataList if i] print "D1 done" return D1 def file_line_count(fname): return sum(1 for line in open(fname)) def file_size_formatted(fname): size = os.path.getsize(fname) print "size:", size for x in ['bytes','KB','MB','GB','TB']: if size < 1024.0: return "%3.1f %s" % (size, x) size /= 1024.0 return "%3.1f %s" % (size, 'TB') # the logfiles are zipped with directory structure # unzip it to the zipdir, throwing away the directory structure. # (so we don't have to know the names of the intermediate directories) def flat_unzip(my_zip, my_dir): resultList = [] with zipfile.ZipFile(my_zip) as zip_file: for member in zip_file.namelist(): filename = os.path.basename(member) # skip directories if not filename: continue # copy file (taken from zipfile's extract) source = zip_file.open(member) target = file(os.path.join(my_dir, filename), "wb") with source, target: shutil.copyfileobj(source, target) # update to have resultList just be the pathname resultList.append(os.path.abspath(target.name)) source.close() target.close() return resultList # gunzip gzfile to outfile def file_gunzip(gzfile, outfile): print "\nGunzip-ing", gzfile, "to", outfile start = time.time() zipped_file = gzip.open(gzfile, 'rb') out_file = open(outfile, 'wb') out_file.writelines(zipped_file) out_file.close() zipped_file.close() print "\nGunzip took", (time.time() - start), "secs" # gzip infile to gzfile def file_gzip(infile, gzfile): print "\nGzip-ing", infile, "to", gzfile start = time.time() in_file = open(infile, 'rb') zipped_file = gzip.open(gzfile, 'wb') zipped_file.writelines(in_file) in_file.close() zipped_file.close() print "\nGzip took", (time.time() - start), "secs" # cat file1 and file2 to outfile def file_cat(file1, file2, outfile): print "\nCat'ing", file1, file2, "to", outfile start = time.time() destination = open(outfile,'wb') shutil.copyfileobj(open(file1,'rb'), destination) shutil.copyfileobj(open(file2,'rb'), destination) destination.close() print "\nCat took", (time.time() - start), "secs" # used in loop, so doing always print def file_append(infile, outfile): verboseprint("\nAppend'ing", infile, "to", outfile) start = time.time() in_file = open(infile,'rb') out_file = open(outfile,'a') out_file.write(in_file.read()) in_file.close() out_file.close() verboseprint("\nAppend took", (time.time() - start), "secs") def file_shuffle(infile, outfile): print "\nShuffle'ing", infile, "to", outfile start = time.time() # lines = open(infile).readlines() # random.shuffle(lines) # open(outfile, 'w').writelines(lines) fi = open(infile, 'r') fo = open(outfile, 'w') subprocess.call(["sort", "-R"],stdin=fi, stdout=fo) print "\nShuffle took", (time.time() - start), "secs" fi.close() fo.close() # FIX! This is a hack to deal with parser bug def file_strip_trailing_spaces(csvPathname1, csvPathname2): infile = open(csvPathname1, 'r') outfile = open(csvPathname2,'w') # existing file gets erased for line in infile.readlines(): # remove various lineends and whitespace (leading and trailing) # make it unix linend outfile.write(line.strip(" \n\r") + "\n") infile.close() outfile.close() print "\n" + csvPathname1 + " stripped to " + csvPathname2 # can R deal with comments in a csv? def file_strip_comments(csvPathname1, csvPathname2): infile = open(csvPathname1, 'r') outfile = open(csvPathname2,'w') # existing file gets erased for line in infile.readlines(): if not line.startswith('#'): outfile.write(line) infile.close() outfile.close() print "\n" + csvPathname1 + " w/o comments to " + csvPathname2 def file_spaces_to_comma(csvPathname1, csvPathname2): infile = open(csvPathname1, 'r') outfile = open(csvPathname2,'w') # existing file gets erased for line in infile.readlines(): outfile.write(re.sub(r' +',r',',line)) infile.close() outfile.close() print "\n" + csvPathname1 + " with space(s)->comma to " + csvPathname2 # UPDATE: R seems to be doing some kind of expand_cat on cols with '.' in them for NA # (the umass/princeton) data sets. Change to 0 for now so both H2O and R use them the # same way def file_clean_for_R(csvPathname1, csvPathname2): infile = open(csvPathname1, 'r') outfile = open(csvPathname2,'w') # existing file gets erased for line in infile.readlines(): # 1) remove comments and header??? # ignore lines with NA? cheap hack ,, doesn't work for end cols # if not ',,' in line and not line.startswith('#') and not re.match('[A-Za-z]+',line): if not line.startswith('#') and not re.match('[A-Za-z]+',line): # 2) remove various lineends and whitespace (leading and trailing)..make it unix linend line = line.strip(" \n\r") + "\n" # 3) change spaces to comma (don't worry about spaces in enums..don't have them for now) line = re.sub(r' +',r',',line) # 4) change "." fields to 0 line = re.sub(',\.,',',0,',line) # middle of line line = re.sub('^\.,','0,',line) # beginning of line line = re.sub(',\.$',',0',line) # end of line outfile.write(line) infile.close() outfile.close() print "\n" + csvPathname1 + " cleaned for R to " + csvPathname2 # this might be slightly pessimistic, but should be superset def might_h2o_think_whitespace(token): # we allow $ prefix and % suffix as decorators to numbers? whitespaceRegex = re.compile(r""" \s*$ # begin, white space or empty space, end """, re.VERBOSE) if whitespaceRegex.match(token): return True else: return False # this might be slightly pessimistic, but should be superset def might_h2o_think_number_or_whitespace(token): # this matches white space? makes all white space count as number? specialRegex = re.compile(r""" \s* [\$+-]? # single chars that might be considered numbers. alow spaces in between \s*$ """, re.VERBOSE) # this matches white space? makes all white space count as number? number1Regex = re.compile(r""" [\s\$\%]* # begin, white space or empty space. any number of leading % or $ too [+-]? # plus or minus. maybe h2o matches multiple? ([0-9]*\.[0-9]*)? # decimal point focused. optional whole and fractional digits. h2o thinks whole thing optional? ([eE][-+]*[0-9]+)? # optional exponent. A single e matches (incorrectly) apparently repeated +- after the e doesn't matter (\s*\[\% ]*)? # can have zero or more percent. Percent can have a space? [\s\$\%]*$ # white space or empty space, any number of trailing % or $ too. end """, re.VERBOSE) # apparently these get detected as number # +e+++10 # +e---10 # this matches white space? makes all white space count as number? number2Regex = re.compile(r""" [\s\$\%]* # begin, white space or empty space. any number of leading % or $ too [+-]? # plus or minus. maybe h2o matches multiple? ([0-9]+)? # one or more digits. h2o thinks whole thing optional (\.[0-9]*)? # optional decimal point and fractional digits ([eE][-+]*[0-9]+)? # optional exponent. a single e matches (incorrectly) apparently repeated +- after the e doesn't matter (\s*\[\% ]*)? # can have zero or more percent. Percent can have a space? [\s\$\%]*$ # white space or empty space, any number of trailing % or $ too. end """, re.VERBOSE) # can nans have the +-%$ decorators?. allow any case? nanRegex = re.compile(r""" [\s\$\%]* # begin, white space or empty space. any number of leading % or $ too [+-]? # plus or minus [Nn][Aa][Nn]? # nan or na (\s*\[\% ]*)? # can have zero or more percent. Percent can have a space? [\s\$\%]*$ # white space or empty space, any number of trailing % or $ too. end """, re.VERBOSE) if specialRegex.match(token) or number1Regex.match(token) or number2Regex.match(token) or nanRegex.match(token): return True else: return False # from nmb10 at http://djangosnippets.org/snippets/2247/ # Shows difference between two json like python objects. # Shows properties, values from first object that are not in the second. # Examples: # import json # or other json serializer # first = json.loads('{"first_name": "Poligraph", "last_name": "Sharikov",}') # second = json.loads('{"first_name": "Poligraphovich", "pet_name": "Sharik"}') # df = JsonDiff(first, second) # df.difference is ["path: last_name"] # JsonDiff(first, second, vice_versa=True) gives you difference from both objects in the one result. # df.difference is ["path: last_name", "path: pet_name"] # JsonDiff(first, second, with_values=True) gives you difference of the values strings. class JsonDiff(object): def __init__(self, first, second, with_values=False, vice_versa=False): self.difference = [] self.check(first, second, with_values=with_values) if vice_versa: self.check(second, first, with_values=with_values) def check(self, first, second, path='', with_values=False): if second != None: if not isinstance(first, type(second)): message = '%s- %s, %s' % (path, type(first), type(second)) TYPE = None self.save_diff(message, TYPE) if isinstance(first, dict): for key in first: # the first part of path must not have trailing dot. if len(path) == 0: new_path = key else: new_path = "%s.%s" % (path, key) if isinstance(second, dict): if second.has_key(key): sec = second[key] else: # there are key in the first, that is not presented in the second PATH = None self.save_diff(new_path, PATH) # prevent further values checking. sec = None # recursive call self.check(first[key], sec, path=new_path, with_values=with_values) else: # second is not dict. every key from first goes to the difference PATH = None self.save_diff(new_path, PATH) self.check(first[key], second, path=new_path, with_values=with_values) # if object is list, loop over it and check. elif isinstance(first, list): for (index, item) in enumerate(first): new_path = "%s[%s]" % (path, index) # try to get the same index from second sec = None if second != None: try: sec = second[index] except (IndexError, KeyError): # goes to difference TYPE = None self.save_diff('%s - %s, %s' % (new_path, type(first), type(second)), TYPE) # recursive call self.check(first[index], sec, path=new_path, with_values=with_values) # not list, not dict. check for equality (only if with_values is True) and return. else: if with_values and second != None: if first != second: self.save_diff('%s - %s | %s' % (path, first, second), 'diff') return def save_diff(self, diff_message, type_): message = '%s: %s' % (type_, diff_message) if diff_message not in self.difference: self.difference.append(message) # per Alex Kotliarov # http://stackoverflow.com/questions/2343535/easiest-way-to-serialize-a-simple-class-object-with-json #This function will produce JSON-formatted string for # an instance of a custom class, # a dictionary that have instances of custom classes as leaves, # a list of instances of custom classes # added depth limiting to original def json_repr(obj, curr_depth=0, max_depth=4): """Represent instance of a class as JSON. Arguments: obj -- any object Return: String that represent JSON-encoded object. """ def serialize(obj, curr_depth): """Recursively walk object's hierarchy. Limit to max_depth""" if curr_depth>max_depth: return if isinstance(obj, (bool, int, long, float, basestring)): return obj elif isinstance(obj, dict): obj = obj.copy() for key in obj: obj[key] = serialize(obj[key], curr_depth+1) return obj elif isinstance(obj, list): return [serialize(item, curr_depth+1) for item in obj] elif isinstance(obj, tuple): return tuple(serialize([item for item in obj], curr_depth+1)) elif hasattr(obj, '__dict__'): return serialize(obj.__dict__, curr_depth+1) else: return repr(obj) # Don't know how to handle, convert to string return (serialize(obj, curr_depth+1)) # b = convert_json(a, 'ascii') # a = json.dumps(serialize(obj)) # c = json.loads(a)
apache-2.0
7,380,870,850,780,625,000
38.613514
130
0.596268
false
JohnGriffiths/dipy
dipy/align/metrics.py
4
46496
""" Metrics for Symmetric Diffeomorphic Registration """ from __future__ import print_function import abc import numpy as np import scipy as sp from scipy import gradient, ndimage from ..utils.six import with_metaclass from . import vector_fields as vfu from . import sumsqdiff as ssd from . import crosscorr as cc from . import expectmax as em from . import floating class SimilarityMetric(with_metaclass(abc.ABCMeta, object)): def __init__(self, dim): r""" Similarity Metric abstract class A similarity metric is in charge of keeping track of the numerical value of the similarity (or distance) between the two given images. It also computes the update field for the forward and inverse displacement fields to be used in a gradient-based optimization algorithm. Note that this metric does not depend on any transformation (affine or non-linear) so it assumes the static and moving images are already warped Parameters ---------- dim : int (either 2 or 3) the dimension of the image domain """ self.dim = dim self.levels_above = None self.levels_below = None self.static_image = None self.static_affine = None self.static_spacing = None self.static_direction = None self.moving_image = None self.moving_affine = None self.moving_spacing = None self.moving_direction = None self.mask0 = False def set_levels_below(self, levels): r"""Informs the metric how many pyramid levels are below the current one Informs this metric the number of pyramid levels below the current one. The metric may change its behavior (e.g. number of inner iterations) accordingly Parameters ---------- levels : int the number of levels below the current Gaussian Pyramid level """ self.levels_below = levels def set_levels_above(self, levels): r"""Informs the metric how many pyramid levels are above the current one Informs this metric the number of pyramid levels above the current one. The metric may change its behavior (e.g. number of inner iterations) accordingly Parameters ---------- levels : int the number of levels above the current Gaussian Pyramid level """ self.levels_above = levels def set_static_image(self, static_image, static_affine, static_spacing, static_direction): r"""Sets the static image being compared against the moving one. Sets the static image. The default behavior (of this abstract class) is simply to assign the reference to an attribute, but generalizations of the metric may need to perform other operations Parameters ---------- static_image : array, shape (R, C) or (S, R, C) the static image """ self.static_image = static_image self.static_affine = static_affine self.static_spacing = static_spacing self.static_direction = static_direction def use_static_image_dynamics(self, original_static_image, transformation): r"""This is called by the optimizer just after setting the static image. This method allows the metric to compute any useful information from knowing how the current static image was generated (as the transformation of an original static image). This method is called by the optimizer just after it sets the static image. Transformation will be an instance of DiffeomorficMap or None if the original_static_image equals self.moving_image. Parameters ---------- original_static_image : array, shape (R, C) or (S, R, C) original image from which the current static image was generated transformation : DiffeomorphicMap object the transformation that was applied to original image to generate the current static image """ pass def set_moving_image(self, moving_image, moving_affine, moving_spacing, moving_direction): r"""Sets the moving image being compared against the static one. Sets the moving image. The default behavior (of this abstract class) is simply to assign the reference to an attribute, but generalizations of the metric may need to perform other operations Parameters ---------- moving_image : array, shape (R, C) or (S, R, C) the moving image """ self.moving_image = moving_image self.moving_affine = moving_affine self.moving_spacing = moving_spacing self.moving_direction = moving_direction def use_moving_image_dynamics(self, original_moving_image, transformation): r"""This is called by the optimizer just after setting the moving image This method allows the metric to compute any useful information from knowing how the current static image was generated (as the transformation of an original static image). This method is called by the optimizer just after it sets the static image. Transformation will be an instance of DiffeomorficMap or None if the original_moving_image equals self.moving_image. Parameters ---------- original_moving_image : array, shape (R, C) or (S, R, C) original image from which the current moving image was generated transformation : DiffeomorphicMap object the transformation that was applied to original image to generate the current moving image """ pass @abc.abstractmethod def initialize_iteration(self): r"""Prepares the metric to compute one displacement field iteration. This method will be called before any compute_forward or compute_backward call, this allows the Metric to pre-compute any useful information for speeding up the update computations. This initialization was needed in ANTS because the updates are called once per voxel. In Python this is unpractical, though. """ @abc.abstractmethod def free_iteration(self): r"""Releases the resources no longer needed by the metric This method is called by the RegistrationOptimizer after the required iterations have been computed (forward and / or backward) so that the SimilarityMetric can safely delete any data it computed as part of the initialization """ @abc.abstractmethod def compute_forward(self): r"""Computes one step bringing the reference image towards the static. Computes the forward update field to register the moving image towards the static image in a gradient-based optimization algorithm """ @abc.abstractmethod def compute_backward(self): r"""Computes one step bringing the static image towards the moving. Computes the backward update field to register the static image towards the moving image in a gradient-based optimization algorithm """ @abc.abstractmethod def get_energy(self): r"""Numerical value assigned by this metric to the current image pair Must return the numeric value of the similarity between the given static and moving images """ class CCMetric(SimilarityMetric): def __init__(self, dim, sigma_diff=2.0, radius=4): r"""Normalized Cross-Correlation Similarity metric. Parameters ---------- dim : int (either 2 or 3) the dimension of the image domain sigma_diff : the standard deviation of the Gaussian smoothing kernel to be applied to the update field at each iteration radius : int the radius of the squared (cubic) neighborhood at each voxel to be considered to compute the cross correlation """ super(CCMetric, self).__init__(dim) self.sigma_diff = sigma_diff self.radius = radius self._connect_functions() def _connect_functions(self): r"""Assign the methods to be called according to the image dimension Assigns the appropriate functions to be called for precomputing the cross-correlation factors according to the dimension of the input images """ if self.dim == 2: self.precompute_factors = cc.precompute_cc_factors_2d self.compute_forward_step = cc.compute_cc_forward_step_2d self.compute_backward_step = cc.compute_cc_backward_step_2d self.reorient_vector_field = vfu.reorient_vector_field_2d elif self.dim == 3: self.precompute_factors = cc.precompute_cc_factors_3d self.compute_forward_step = cc.compute_cc_forward_step_3d self.compute_backward_step = cc.compute_cc_backward_step_3d self.reorient_vector_field = vfu.reorient_vector_field_3d else: raise ValueError('CC Metric not defined for dim. %d' % (self.dim)) def initialize_iteration(self): r"""Prepares the metric to compute one displacement field iteration. Pre-computes the cross-correlation factors for efficient computation of the gradient of the Cross Correlation w.r.t. the displacement field. It also pre-computes the image gradients in the physical space by re-orienting the gradients in the voxel space using the corresponding affine transformations. """ self.factors = self.precompute_factors(self.static_image, self.moving_image, self.radius) self.factors = np.array(self.factors) self.gradient_moving = np.empty( shape=(self.moving_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(sp.gradient(self.moving_image)): self.gradient_moving[..., i] = grad # Convert moving image's gradient field from voxel to physical space if self.moving_spacing is not None: self.gradient_moving /= self.moving_spacing if self.moving_direction is not None: self.reorient_vector_field(self.gradient_moving, self.moving_direction) self.gradient_static = np.empty( shape=(self.static_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(sp.gradient(self.static_image)): self.gradient_static[..., i] = grad # Convert moving image's gradient field from voxel to physical space if self.static_spacing is not None: self.gradient_static /= self.static_spacing if self.static_direction is not None: self.reorient_vector_field(self.gradient_static, self.static_direction) def free_iteration(self): r"""Frees the resources allocated during initialization """ del self.factors del self.gradient_moving del self.gradient_static def compute_forward(self): r"""Computes one step bringing the moving image towards the static. Computes the update displacement field to be used for registration of the moving image towards the static image """ displacement, self.energy = self.compute_forward_step( self.gradient_static, self.factors, self.radius) displacement = np.array(displacement) for i in range(self.dim): displacement[..., i] = ndimage.filters.gaussian_filter( displacement[..., i], self.sigma_diff) return displacement def compute_backward(self): r"""Computes one step bringing the static image towards the moving. Computes the update displacement field to be used for registration of the static image towards the moving image """ displacement, energy = self.compute_backward_step(self.gradient_moving, self.factors, self.radius) displacement = np.array(displacement) for i in range(self.dim): displacement[..., i] = ndimage.filters.gaussian_filter( displacement[..., i], self.sigma_diff) return displacement def get_energy(self): r"""Numerical value assigned by this metric to the current image pair Returns the Cross Correlation (data term) energy computed at the largest iteration """ return self.energy class EMMetric(SimilarityMetric): def __init__(self, dim, smooth=1.0, inner_iter=5, q_levels=256, double_gradient=True, step_type='gauss_newton'): r"""Expectation-Maximization Metric Similarity metric based on the Expectation-Maximization algorithm to handle multi-modal images. The transfer function is modeled as a set of hidden random variables that are estimated at each iteration of the algorithm. Parameters ---------- dim : int (either 2 or 3) the dimension of the image domain smooth : float smoothness parameter, the larger the value the smoother the deformation field inner_iter : int number of iterations to be performed at each level of the multi- resolution Gauss-Seidel optimization algorithm (this is not the number of steps per Gaussian Pyramid level, that parameter must be set for the optimizer, not the metric) q_levels : number of quantization levels (equal to the number of hidden variables in the EM algorithm) double_gradient : boolean if True, the gradient of the expected static image under the moving modality will be added to the gradient of the moving image, similarly, the gradient of the expected moving image under the static modality will be added to the gradient of the static image. step_type : string ('gauss_newton', 'demons') the optimization schedule to be used in the multi-resolution Gauss-Seidel optimization algorithm (not used if Demons Step is selected) """ super(EMMetric, self).__init__(dim) self.smooth = smooth self.inner_iter = inner_iter self.q_levels = q_levels self.use_double_gradient = double_gradient self.step_type = step_type self.static_image_mask = None self.moving_image_mask = None self.staticq_means_field = None self.movingq_means_field = None self.movingq_levels = None self.staticq_levels = None self._connect_functions() def _connect_functions(self): r"""Assign the methods to be called according to the image dimension Assigns the appropriate functions to be called for image quantization, statistics computation and multi-resolution iterations according to the dimension of the input images """ if self.dim == 2: self.quantize = em.quantize_positive_2d self.compute_stats = em.compute_masked_class_stats_2d self.reorient_vector_field = vfu.reorient_vector_field_2d elif self.dim == 3: self.quantize = em.quantize_positive_3d self.compute_stats = em.compute_masked_class_stats_3d self.reorient_vector_field = vfu.reorient_vector_field_3d else: raise ValueError('EM Metric not defined for dim. %d' % (self.dim)) if self.step_type == 'demons': self.compute_step = self.compute_demons_step elif self.step_type == 'gauss_newton': self.compute_step = self.compute_gauss_newton_step else: raise ValueError('Opt. step %s not defined' % (self.step_type)) def initialize_iteration(self): r"""Prepares the metric to compute one displacement field iteration. Pre-computes the transfer functions (hidden random variables) and variances of the estimators. Also pre-computes the gradient of both input images. Note that once the images are transformed to the opposite modality, the gradient of the transformed images can be used with the gradient of the corresponding modality in the same fashion as diff-demons does for mono-modality images. If the flag self.use_double_gradient is True these gradients are averaged. """ sampling_mask = self.static_image_mask*self.moving_image_mask self.sampling_mask = sampling_mask staticq, self.staticq_levels, hist = self.quantize(self.static_image, self.q_levels) staticq = np.array(staticq, dtype=np.int32) self.staticq_levels = np.array(self.staticq_levels) staticq_means, staticq_vars = self.compute_stats(sampling_mask, self.moving_image, self.q_levels, staticq) staticq_means[0] = 0 self.staticq_means = np.array(staticq_means) self.staticq_variances = np.array(staticq_vars) self.staticq_sigma_sq_field = self.staticq_variances[staticq] self.staticq_means_field = self.staticq_means[staticq] self.gradient_moving = np.empty( shape=(self.moving_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(sp.gradient(self.moving_image)): self.gradient_moving[..., i] = grad # Convert moving image's gradient field from voxel to physical space if self.moving_spacing is not None: self.gradient_moving /= self.moving_spacing if self.moving_direction is not None: self.reorient_vector_field(self.gradient_moving, self.moving_direction) self.gradient_static = np.empty( shape=(self.static_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(sp.gradient(self.static_image)): self.gradient_static[..., i] = grad # Convert moving image's gradient field from voxel to physical space if self.static_spacing is not None: self.gradient_static /= self.static_spacing if self.static_direction is not None: self.reorient_vector_field(self.gradient_static, self.static_direction) movingq, self.movingq_levels, hist = self.quantize(self.moving_image, self.q_levels) movingq = np.array(movingq, dtype=np.int32) self.movingq_levels = np.array(self.movingq_levels) movingq_means, movingq_variances = self.compute_stats( sampling_mask, self.static_image, self.q_levels, movingq) movingq_means[0] = 0 self.movingq_means = np.array(movingq_means) self.movingq_variances = np.array(movingq_variances) self.movingq_sigma_sq_field = self.movingq_variances[movingq] self.movingq_means_field = self.movingq_means[movingq] if self.use_double_gradient: for i, grad in enumerate(sp.gradient(self.staticq_means_field)): self.gradient_moving[..., i] += grad for i, grad in enumerate(sp.gradient(self.movingq_means_field)): self.gradient_static[..., i] += grad def free_iteration(self): r""" Frees the resources allocated during initialization """ del self.sampling_mask del self.staticq_levels del self.movingq_levels del self.staticq_sigma_sq_field del self.staticq_means_field del self.movingq_sigma_sq_field del self.movingq_means_field del self.gradient_moving del self.gradient_static def compute_forward(self): """Computes one step bringing the reference image towards the static. Computes the forward update field to register the moving image towards the static image in a gradient-based optimization algorithm """ return self.compute_step(True) def compute_backward(self): r"""Computes one step bringing the static image towards the moving. Computes the update displacement field to be used for registration of the static image towards the moving image """ return self.compute_step(False) def compute_gauss_newton_step(self, forward_step=True): r"""Computes the Gauss-Newton energy minimization step Computes the Newton step to minimize this energy, i.e., minimizes the linearized energy function with respect to the regularized displacement field (this step does not require post-smoothing, as opposed to the demons step, which does not include regularization). To accelerate convergence we use the multi-grid Gauss-Seidel algorithm proposed by Bruhn and Weickert et al [Bruhn05] Parameters ---------- forward_step : boolean if True, computes the Newton step in the forward direction (warping the moving towards the static image). If False, computes the backward step (warping the static image to the moving image) Returns ------- displacement : array, shape (R, C, 2) or (S, R, C, 3) the Newton step References ---------- [Bruhn05] Andres Bruhn and Joachim Weickert, "Towards ultimate motion estimation: combining highest accuracy with real-time performance", 10th IEEE International Conference on Computer Vision, 2005. ICCV 2005. """ reference_shape = self.static_image.shape if forward_step: gradient = self.gradient_static delta = self.staticq_means_field - self.moving_image sigma_sq_field = self.staticq_sigma_sq_field else: gradient = self.gradient_moving delta = self.movingq_means_field - self.static_image sigma_sq_field = self.movingq_sigma_sq_field displacement = np.zeros(shape=(reference_shape)+(self.dim,), dtype=floating) if self.dim == 2: self.energy = v_cycle_2d(self.levels_below, self.inner_iter, delta, sigma_sq_field, gradient, None, self.smooth, displacement) else: self.energy = v_cycle_3d(self.levels_below, self.inner_iter, delta, sigma_sq_field, gradient, None, self.smooth, displacement) return displacement def compute_demons_step(self, forward_step=True): r"""Demons step for EM metric Parameters ---------- forward_step : boolean if True, computes the Demons step in the forward direction (warping the moving towards the static image). If False, computes the backward step (warping the static image to the moving image) Returns ------- displacement : array, shape (R, C, 2) or (S, R, C, 3) the Demons step """ sigma_reg_2 = np.sum(self.static_spacing**2)/self.dim if forward_step: gradient = self.gradient_static delta_field = self.static_image - self.movingq_means_field sigma_sq_field = self.movingq_sigma_sq_field else: gradient = self.gradient_moving delta_field = self.moving_image - self.staticq_means_field sigma_sq_field = self.staticq_sigma_sq_field if self.dim == 2: step, self.energy = em.compute_em_demons_step_2d(delta_field, sigma_sq_field, gradient, sigma_reg_2, None) else: step, self.energy = em.compute_em_demons_step_3d(delta_field, sigma_sq_field, gradient, sigma_reg_2, None) for i in range(self.dim): step[..., i] = ndimage.filters.gaussian_filter(step[..., i], self.smooth) return step def get_energy(self): r"""The numerical value assigned by this metric to the current image pair Returns the EM (data term) energy computed at the largest iteration """ return self.energy def use_static_image_dynamics(self, original_static_image, transformation): r"""This is called by the optimizer just after setting the static image. EMMetric takes advantage of the image dynamics by computing the current static image mask from the originalstaticImage mask (warped by nearest neighbor interpolation) Parameters ---------- original_static_image : array, shape (R, C) or (S, R, C) the original static image from which the current static image was generated, the current static image is the one that was provided via 'set_static_image(...)', which may not be the same as the original static image but a warped version of it (even the static image changes during Symmetric Normalization, not only the moving one). transformation : DiffeomorphicMap object the transformation that was applied to the original_static_image to generate the current static image """ self.static_image_mask = (original_static_image > 0).astype(np.int32) if transformation is None: return shape = np.array(self.static_image.shape, dtype=np.int32) affine = self.static_affine self.static_image_mask = transformation.transform( self.static_image_mask, 'nearest', None, shape, affine) def use_moving_image_dynamics(self, original_moving_image, transformation): r"""This is called by the optimizer just after setting the moving image. EMMetric takes advantage of the image dynamics by computing the current moving image mask from the original_moving_image mask (warped by nearest neighbor interpolation) Parameters ---------- original_moving_image : array, shape (R, C) or (S, R, C) the original moving image from which the current moving image was generated, the current moving image is the one that was provided via 'set_moving_image(...)', which may not be the same as the original moving image but a warped version of it. transformation : DiffeomorphicMap object the transformation that was applied to the original_moving_image to generate the current moving image """ self.moving_image_mask = (original_moving_image > 0).astype(np.int32) if transformation is None: return shape = np.array(self.moving_image.shape, dtype=np.int32) affine = self.moving_affine self.moving_image_mask = transformation.transform( self.moving_image_mask, 'nearest', None, shape, affine) class SSDMetric(SimilarityMetric): def __init__(self, dim, smooth=4, inner_iter=10, step_type='demons'): r"""Sum of Squared Differences (SSD) Metric Similarity metric for (mono-modal) nonlinear image registration defined by the sum of squared differences (SSD) Parameters ---------- dim : int (either 2 or 3) the dimension of the image domain smooth : float smoothness parameter, the larger the value the smoother the deformation field inner_iter : int number of iterations to be performed at each level of the multi- resolution Gauss-Seidel optimization algorithm (this is not the number of steps per Gaussian Pyramid level, that parameter must be set for the optimizer, not the metric) step_type : string the displacement field step to be computed when 'compute_forward' and 'compute_backward' are called. Either 'demons' or 'gauss_newton' """ super(SSDMetric, self).__init__(dim) self.smooth = smooth self.inner_iter = inner_iter self.step_type = step_type self.levels_below = 0 self._connect_functions() def _connect_functions(self): r"""Assign the methods to be called according to the image dimension Assigns the appropriate functions to be called for vector field reorientation and displacement field steps according to the dimension of the input images and the select type of step (either Demons or Gauss Newton) """ if self.dim == 2: self.reorient_vector_field = vfu.reorient_vector_field_2d elif self.dim == 3: self.reorient_vector_field = vfu.reorient_vector_field_3d else: raise ValueError('SSD Metric not defined for dim. %d' % (self.dim)) if self.step_type == 'gauss_newton': self.compute_step = self.compute_gauss_newton_step elif self.step_type == 'demons': self.compute_step = self.compute_demons_step else: raise ValueError('Opt. step %s not defined' % (self.step_type)) def initialize_iteration(self): r"""Prepares the metric to compute one displacement field iteration. Pre-computes the gradient of the input images to be used in the computation of the forward and backward steps. """ self.gradient_moving = np.empty( shape=(self.moving_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(gradient(self.moving_image)): self.gradient_moving[..., i] = grad # Convert static image's gradient field from voxel to physical space if self.moving_spacing is not None: self.gradient_moving /= self.moving_spacing if self.moving_direction is not None: self.reorient_vector_field(self.gradient_moving, self.moving_direction) self.gradient_static = np.empty( shape=(self.static_image.shape)+(self.dim,), dtype=floating) for i, grad in enumerate(gradient(self.static_image)): self.gradient_static[..., i] = grad # Convert static image's gradient field from voxel to physical space if self.static_spacing is not None: self.gradient_static /= self.static_spacing if self.static_direction is not None: self.reorient_vector_field(self.gradient_static, self.static_direction) def compute_forward(self): r"""Computes one step bringing the reference image towards the static. Computes the update displacement field to be used for registration of the moving image towards the static image """ return self.compute_step(True) def compute_backward(self): r"""Computes one step bringing the static image towards the moving. Computes the update displacement field to be used for registration of the static image towards the moving image """ return self.compute_step(False) def compute_gauss_newton_step(self, forward_step=True): r"""Computes the Gauss-Newton energy minimization step Minimizes the linearized energy function (Newton step) defined by the sum of squared differences of corresponding pixels of the input images with respect to the displacement field. Parameters ---------- forward_step : boolean if True, computes the Newton step in the forward direction (warping the moving towards the static image). If False, computes the backward step (warping the static image to the moving image) Returns ------- displacement : array, shape = static_image.shape + (3,) if forward_step==True, the forward SSD Gauss-Newton step, else, the backward step """ reference_shape = self.static_image.shape if forward_step: gradient = self.gradient_static delta_field = self.static_image-self.moving_image else: gradient = self.gradient_moving delta_field = self.moving_image - self.static_image displacement = np.zeros(shape=(reference_shape)+(self.dim,), dtype=floating) if self.dim == 2: self.energy = v_cycle_2d(self.levels_below, self.inner_iter, delta_field, None, gradient, None, self.smooth, displacement) else: self.energy = v_cycle_3d(self.levels_below, self.inner_iter, delta_field, None, gradient, None, self.smooth, displacement) return displacement def compute_demons_step(self, forward_step=True): r"""Demons step for SSD metric Computes the demons step proposed by Vercauteren et al.[Vercauteren09] for the SSD metric. Parameters ---------- forward_step : boolean if True, computes the Demons step in the forward direction (warping the moving towards the static image). If False, computes the backward step (warping the static image to the moving image) Returns ------- displacement : array, shape (R, C, 2) or (S, R, C, 3) the Demons step References ---------- [Vercauteren09] Tom Vercauteren, Xavier Pennec, Aymeric Perchant, Nicholas Ayache, "Diffeomorphic Demons: Efficient Non-parametric Image Registration", Neuroimage 2009 """ sigma_reg_2 = np.sum(self.static_spacing**2)/self.dim if forward_step: gradient = self.gradient_static delta_field = self.static_image - self.moving_image else: gradient = self.gradient_moving delta_field = self.moving_image - self.static_image if self.dim == 2: step, self.energy = ssd.compute_ssd_demons_step_2d(delta_field, gradient, sigma_reg_2, None) else: step, self.energy = ssd.compute_ssd_demons_step_3d(delta_field, gradient, sigma_reg_2, None) for i in range(self.dim): step[..., i] = ndimage.filters.gaussian_filter(step[..., i], self.smooth) return step def get_energy(self): r"""The numerical value assigned by this metric to the current image pair Returns the Sum of Squared Differences (data term) energy computed at the largest iteration """ return self.energy def free_iteration(self): r""" Nothing to free for the SSD metric """ pass def v_cycle_2d(n, k, delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement, depth=0): r"""Multi-resolution Gauss-Seidel solver using V-type cycles Multi-resolution Gauss-Seidel solver: solves the Gauss-Newton linear system by first filtering (GS-iterate) the current level, then solves for the residual at a coarser resolution and finally refines the solution at the current resolution. This scheme corresponds to the V-cycle proposed by Bruhn and Weickert[Bruhn05]. Parameters ---------- n : int number of levels of the multi-resolution algorithm (it will be called recursively until level n == 0) k : int the number of iterations at each multi-resolution level delta_field : array, shape (R, C) the difference between the static and moving image (the 'derivative w.r.t. time' in the optical flow model) sigma_sq_field : array, shape (R, C) the variance of the gray level value at each voxel, according to the EM model (for SSD, it is 1 for all voxels). Inf and 0 values are processed specially to support infinite and zero variance. gradient_field : array, shape (R, C, 2) the gradient of the moving image target : array, shape (R, C, 2) right-hand side of the linear system to be solved in the Weickert's multi-resolution algorithm lambda_param : float smoothness parameter, the larger its value the smoother the displacement field displacement : array, shape (R, C, 2) the displacement field to start the optimization from Returns ------- energy : the energy of the EM (or SSD if sigmafield[...]==1) metric at this iteration References ---------- [Bruhn05] Andres Bruhn and Joachim Weickert, "Towards ultimate motion estimation: combining highest accuracy with real-time performance", 10th IEEE International Conference on Computer Vision, 2005. ICCV 2005. """ # pre-smoothing for i in range(k): ssd.iterate_residual_displacement_field_ssd_2d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement) if n == 0: energy = ssd.compute_energy_ssd_2d(delta_field) return energy # solve at coarser grid residual = None residual = ssd.compute_residual_displacement_field_ssd_2d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement, residual) sub_residual = np.array(vfu.downsample_displacement_field_2d(residual)) del residual subsigma_sq_field = None if sigma_sq_field is not None: subsigma_sq_field = vfu.downsample_scalar_field_2d(sigma_sq_field) subdelta_field = vfu.downsample_scalar_field_2d(delta_field) subgradient_field = np.array( vfu.downsample_displacement_field_2d(gradient_field)) shape = np.array(displacement.shape).astype(np.int32) half_shape = ((shape[0] + 1) // 2, (shape[1] + 1) // 2, 2) sub_displacement = np.zeros(shape=half_shape, dtype=floating) sublambda_param = lambda_param*0.25 v_cycle_2d(n-1, k, subdelta_field, subsigma_sq_field, subgradient_field, sub_residual, sublambda_param, sub_displacement, depth+1) # displacement += np.array( # vfu.upsample_displacement_field(sub_displacement, shape)) displacement += vfu.resample_displacement_field_2d(sub_displacement, np.array([0.5, 0.5]), shape) # post-smoothing for i in range(k): ssd.iterate_residual_displacement_field_ssd_2d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement) energy = ssd.compute_energy_ssd_2d(delta_field) return energy def v_cycle_3d(n, k, delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement, depth=0): r"""Multi-resolution Gauss-Seidel solver using V-type cycles Multi-resolution Gauss-Seidel solver: solves the linear system by first filtering (GS-iterate) the current level, then solves for the residual at a coarser resolution and finally refines the solution at the current resolution. This scheme corresponds to the V-cycle proposed by Bruhn and Weickert[1]. [1] Andres Bruhn and Joachim Weickert, "Towards ultimate motion estimation: combining highest accuracy with real-time performance", 10th IEEE International Conference on Computer Vision, 2005. ICCV 2005. Parameters ---------- n : int number of levels of the multi-resolution algorithm (it will be called recursively until level n == 0) k : int the number of iterations at each multi-resolution level delta_field : array, shape (S, R, C) the difference between the static and moving image (the 'derivative w.r.t. time' in the optical flow model) sigma_sq_field : array, shape (S, R, C) the variance of the gray level value at each voxel, according to the EM model (for SSD, it is 1 for all voxels). Inf and 0 values are processed specially to support infinite and zero variance. gradient_field : array, shape (S, R, C, 3) the gradient of the moving image target : array, shape (S, R, C, 3) right-hand side of the linear system to be solved in the Weickert's multi-resolution algorithm lambda_param : float smoothness parameter, the larger its value the smoother the displacement field displacement : array, shape (S, R, C, 3) the displacement field to start the optimization from Returns ------- energy : the energy of the EM (or SSD if sigmafield[...]==1) metric at this iteration """ # pre-smoothing for i in range(k): ssd.iterate_residual_displacement_field_ssd_3d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement) if n == 0: energy = ssd.compute_energy_ssd_3d(delta_field) return energy # solve at coarser grid residual = ssd.compute_residual_displacement_field_ssd_3d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement, None) sub_residual = np.array(vfu.downsample_displacement_field_3d(residual)) del residual subsigma_sq_field = None if sigma_sq_field is not None: subsigma_sq_field = vfu.downsample_scalar_field_3d(sigma_sq_field) subdelta_field = vfu.downsample_scalar_field_3d(delta_field) subgradient_field = np.array( vfu.downsample_displacement_field_3d(gradient_field)) shape = np.array(displacement.shape).astype(np.int32) sub_displacement = np.zeros( shape=((shape[0]+1)//2, (shape[1]+1)//2, (shape[2]+1)//2, 3), dtype=floating) sublambda_param = lambda_param*0.25 v_cycle_3d(n-1, k, subdelta_field, subsigma_sq_field, subgradient_field, sub_residual, sublambda_param, sub_displacement, depth+1) del subdelta_field del subsigma_sq_field del subgradient_field del sub_residual displacement += vfu.resample_displacement_field_3d(sub_displacement, 0.5 * np.ones(3), shape) del sub_displacement # post-smoothing for i in range(k): ssd.iterate_residual_displacement_field_ssd_3d(delta_field, sigma_sq_field, gradient_field, target, lambda_param, displacement) energy = ssd.compute_energy_ssd_3d(delta_field) return energy
bsd-3-clause
-8,108,721,500,854,902,000
42.292365
81
0.584287
false
mmaelicke/scikit-gstat
skgstat/Kriging.py
1
17552
""" The kriging module offers only an Ordinary Kriging routine (OK) that can be used together with the skgstat.Variogram class. The usage of the class is inspired by the scipy.interpolate classes. """ import time import numpy as np from scipy.spatial.distance import squareform from scipy.linalg import solve as scipy_solve from numpy.linalg import solve as numpy_solve, LinAlgError, inv from multiprocessing import Pool from .Variogram import Variogram from .MetricSpace import MetricSpace, MetricSpacePair class LessPointsError(RuntimeError): pass class SingularMatrixError(LinAlgError): pass class IllMatrixError(RuntimeWarning): pass def inv_solve(a, b): return inv(a).dot(b) class OrdinaryKriging: def __init__( self, variogram, min_points=5, max_points=15, mode='exact', precision=100, solver='inv', n_jobs=1, perf=False, sparse=False, coordinates=None, values=None ): """Ordinary Kriging routine Ordinary kriging estimator derived from the given `Variogram <skgstat.Variogram>` class. To calculate estimations for unobserved locations, an instance of this class can either be called, or the `OrdinaryKriging.transform` method can be used. Parameters ---------- variogram : Variogram Variogram used to build the kriging matrix. Make sure that this instance is describing the spatial dependence in the data well, otherwise the kriging estimation will most likely produce bad estimations. min_points : int Minimum amount of points, that have to lie within the variogram's range. In case not enough points are available, the estimation will be rejected and a null value will be estimated. max_points : int Maximum amount of points, that will be considered for the estimation of one unobserved location. In case more points are available within the variogram's range, only the `max_points` closest will be used for estimation. Note that the kriging matrix will be an max_points x max_points matrix and large numbers do significantly increase the calculation time. mode : str Has to be one of 'exact' or 'estimate'. In exact mode (default) the variogram matrix will be calculated from scratch in each iteration. This gives an exact solution, but it is also slower. In estimate mode, a set of semivariances is pre-calculated and the closest value will be used. This is significantly faster, but the estimation quality is dependent on the given precision. precision : int Only needed if `mode='estimate'`. This is the number of pre-calculated in-range semivariances. If chosen too low, the estimation will be off, if too high the performance gain is limited. solver : str Do not change this argument n_jobs : int Number of processes to be started in multiprocessing. perf : bool If True, the different parts of the algorithm will record their processing time. This is meant to be used for optimization and will be removed in a future version. Do not rely on this argument. sparse : bool coordinates: numpy.ndarray, MetricSpace values: numpy.ndarray """ # store arguments to the instance if isinstance(variogram, Variogram): if coordinates is None: coordinates = variogram.coordinates if values is None: values = variogram.values variogram_descr = variogram.describe() if variogram_descr["model"] == "harmonize": variogram_descr["model"] = variogram._build_harmonized_model() variogram = variogram_descr self.sparse = sparse # general attributes self._minp = min_points self._maxp = max_points self.min_points = min_points self.max_points = max_points # general settings self.n_jobs = n_jobs self.perf = perf self.range = variogram['effective_range'] self.nugget = variogram['nugget'] self.sill = variogram['sill'] self.dist_metric = variogram["dist_func"] # coordinates and semivariance function if not isinstance(coordinates, MetricSpace): coordinates, values = self._remove_duplicated_coordinates(coordinates, values) coordinates = MetricSpace(coordinates.copy(), self.dist_metric, self.range if self.sparse else None) else: assert self.dist_metric == coordinates.dist_metric, "Distance metric of variogram differs from distance metric of coordinates" assert coordinates.max_dist is None or coordinates.max_dist == self.range, "Sparse coordinates must have max_dist == variogram.effective_range" self.values = values.copy() self.coords = coordinates self.gamma_model = Variogram.fitted_model_function(**variogram) self.z = None # calculation mode; self.range has to be initialized self._mode = mode self._precision = precision self._prec_dist = None self._prec_g = None self.mode = mode self.precision = precision # solver settings self._solver = solver self._solve = None self.solver = solver # initialize error counter self.singular_error = 0 self.no_points_error = 0 self.ill_matrix = 0 # performance counter if self.perf: self.perf_dist = list() self.perf_mat = list() self.perf_solv = list() def dist(self, x): return Variogram.wrapped_distance_function(self.dist_metric, x) @classmethod def _remove_duplicated_coordinates(cls, coords, values): """Extract the coordinates and values The coordinates array is checked for duplicates and only the first instance of a duplicate is used. Duplicated coordinates would result in duplicated rows in the variogram matrix and make it singular. """ c = coords v = values _, idx = np.unique(c, axis=0, return_index=True) # sort the index to preserve initial order, if no duplicates were found idx.sort() return c[idx], v[idx] @property def min_points(self): return self._minp @min_points.setter def min_points(self, value): # check the value if not isinstance(value, int): raise ValueError('min_points has to be an integer.') if value < 0: raise ValueError('min_points can\'t be negative.') if value > self._maxp: raise ValueError('min_points can\'t be larger than max_points.') # set self._minp = value @property def max_points(self): return self._maxp @max_points.setter def max_points(self, value): # check the value if not isinstance(value, int): raise ValueError('max_points has to be an integer.') if value < 0: raise ValueError('max_points can\'t be negative.') if value < self._minp: raise ValueError('max_points can\'t be smaller than min_points.') # set self._maxp = value @property def mode(self): return self._mode @mode.setter def mode(self, value): if value == 'exact': self._prec_g = None self._prec_dist = None elif value == 'estimate': self._precalculate_matrix() else: raise ValueError("mode has to be one of 'exact', 'estimate'.") self._mode = value @property def precision(self): return self._precision @precision.setter def precision(self, value): if not isinstance(value, int): raise TypeError('precision has to be of type int') if value < 1: raise ValueError('The precision has be be > 1') self._precision = value self._precalculate_matrix() @property def solver(self): return self._solver @solver.setter def solver(self, value): if value == 'numpy': self._solve = numpy_solve elif value == 'scipy': self._solve = scipy_solve elif value == 'inv': self._solve = inv_solve else: raise AttributeError("solver has to be ['inv', 'numpy', 'scipy']") self._solver = value def transform(self, *x): """Kriging returns an estimation of the observable for the given unobserved locations. Each coordinate dimension should be a 1D array. .. versionchanged:: 0.6.4 sigma array is now initialized with NaN, instead of empty. Parameters ---------- x : numpy.array, MetricSpace One 1D array for each coordinate dimension. Typically two or three array, x, y, (z) are passed for 2D and 3D Kriging Returns ------- Z : numpy.array Array of estimates """ # reset the internal error counters self.singular_error = 0 self.no_points_error = 0 self.ill_matrix = 0 # reset the internal performance counter if self.perf: self.perf_dist, self.perf_mat, self.perf_solv = [], [], [] if len(x) != 1 or not isinstance(x[0], MetricSpace): self.transform_coords = MetricSpace(np.column_stack(x).copy(), self.dist_metric, self.range if self.sparse else None) else: self.transform_coords = x[0] self.transform_coords_pair = MetricSpacePair(self.transform_coords, self.coords) # DEV: this is dirty, not sure how to do it better at the moment #self.sigma = np.empty(len(x[0])) self.sigma = np.ones(len(x[0])) * np.nan self.__sigma_index = 0 # if multi-core, than here if self.n_jobs is None or self.n_jobs == 1: z = np.fromiter(map(self._estimator, range(len(self.transform_coords))), dtype=float) else: def f(idxs): return self._estimator(idxs) with Pool(self.n_jobs) as p: z = p.starmap(f, range(len(self.transform_coords))) # print warnings if self.singular_error > 0: print('Warning: %d kriging matrices were singular.' % self.singular_error) if self.no_points_error > 0: print('Warning: for %d locations, not enough neighbors were ' 'found within the range.' % self.no_points_error) if self.ill_matrix > 0: print('Warning: %d kriging matrices were ill-conditioned.' ' The result may not be accurate.' % self.ill_matrix) # store the field in the instance itself self.z = np.array(z) return np.array(z) def _estimator(self, idx): """Estimation wrapper Wrapper around OrdinaryKriging._krige function to build the point of interest for arbitrary number of dimensions. SingularMatrixError and LessPointsError are handled and the error counters are increased. In both cases numpy.NaN will be used as estimate. ..versionchanged:: 0.6.4 sigma_index is now always incremented """ # indicate if this esimation raised an error did_error = False # reun estimation try: estimation, sigma = self._krige(idx) except SingularMatrixError: self.singular_error += 1 did_error = True except LessPointsError: self.no_points_error += 1 did_error = True except IllMatrixError: self.ill_matrix += 1 did_error = True # TODO: This is a side-effect and I need to re-design this part: if not did_error: self.sigma[self.__sigma_index] = sigma z = estimation else: # on error - Z* is NaN z = np.nan # in any case increment the sigma index counter self.__sigma_index += 1 return z def _krige(self, idx): r"""Algorithm Kriging algorithm for one point. This is the place, where the algorithm shall be changed and optimized. Parameters ---------- idx : int Index into self.transform_* arrays for an unobserved location Raises ------ SingularMatrixError: Raised if the kriging matrix is singular and therefore the equation system cannot be solved. LessPointsError: Raised if there are not the required minimum of points within the variogram's radius. Notes: ------ Z is calculated as follows: .. math:: \hat{Z} = \sum_i(w_i * z_i) where :math:`w_i` is the calulated kriging weight for the i-th point and :math:`z_i` is the observed value at that point. The kriging variance :math:`\sigma^2` (sigma) is calculate as follows: .. math:: \sigma^2 = \sum_i(w_i * \gamma(p_0 - p_i)) + \lambda where :math:`w_i` is again the weight, :math:`\gamma(p_0 - p_i)` is the semivairance of the distance between the unobserved location and the i-th observation. :math:`\lamda` is the Lagrange multiplier needed to minimize the estimation error. Returns ------- Z : float estimated value at p sigma : float kriging variance :math:`\sigma^2` for p. """ if self.perf: t0 = time.time() # get the point and index p = self.transform_coords.coords[idx, :] idx = self.transform_coords_pair.find_closest( idx, self.range, self._maxp ) # raise an error if not enough points are found if idx.size < self._minp: raise LessPointsError # finally find the points and values in_range = self.coords.coords[idx] values = self.values[idx] dist_mat = self.coords.diagonal(idx) # if performance is tracked, time this step if self.perf: t1 = time.time() self.perf_dist.append(t1 - t0) # build the kriging Matrix; needs N + 1 dimensionality if self.mode == 'exact': a = self._build_matrix(dist_mat) else: a = self._estimate_matrix(dist_mat) # add row a column of 1's n = len(in_range) a = np.concatenate((squareform(a), np.ones((n, 1))), axis=1) a = np.concatenate((a, np.ones((1, n + 1))), axis=0) # add lagrange multiplier a[-1, -1] = 0 if self.perf: t2 = time.time() self.perf_mat.append(t2 - t1) # build the matrix of solutions A _p = np.concatenate(([p], in_range)) _dists = self.dist(_p)[:len(_p) - 1] _g = self.gamma_model(_dists) b = np.concatenate((_g, [1])) # solve the system try: _lambda = self._solve(a, b) except LinAlgError as e: print(a) if str(e) == 'Matrix is singular.': raise SingularMatrixError else: raise e except RuntimeWarning as w: if 'Ill-conditioned matrix' in str(w): print(a) raise IllMatrixError else: raise w except ValueError as e: print('[DEBUG]: print variogram matrix and distance matrix:') print(a) print(_dists) raise e finally: if self.perf: t3 = time.time() self.perf_solv.append(t3 - t2) # calculate Kriging variance # sigma is the weights times the semi-variance to p0 # plus the lagrange factor sigma = sum(b[:-1] * _lambda[:-1]) + _lambda[-1] # calculate Z Z = _lambda[:-1].dot(values) # return return Z, sigma def _build_matrix(self, distance_matrix): # calculate the upper matrix return self.gamma_model(distance_matrix) def _precalculate_matrix(self): # pre-calculated distance self._prec_dist = np.linspace(0, self.range, self.precision) # pre-calculate semivariance self._prec_g = self.gamma_model(self._prec_dist) def _estimate_matrix(self, distance_matrix): # transform to the 'precision-space', which matches with the index dist_n = ((distance_matrix / self.range) * self.precision).astype(int) # create the gamma array g = np.ones(dist_n.shape) * -1 # find all indices inside and outside the range out_ = np.where(dist_n >= self.precision)[0] in_ = np.where(dist_n < self.precision)[0] # all semivariances outside are set to sill, # the inside are estimated from the precompiled g[out_] = self.sill g[in_] = self._prec_g[dist_n[in_]] return g
mit
-6,802,020,258,376,292,000
31.868914
155
0.582384
false
ghislainv/deforestprob
forestatrisk/niceplot.py
1
3199
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # author :Ghislain Vieilledent # email :ghislain.vieilledent@cirad.fr, ghislainv@gmail.com # web :https://ecology.ghislainv.fr # python_version :>=2.7 # license :GPLv3 # ============================================================================== from __future__ import division, print_function # Python 3 compatibility import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from osgeo import gdal from matplotlib.colors import ListedColormap, LinearSegmentedColormap # color_map def color_map(plot_type="prob"): if (plot_type == "prob"): # Colormap colors = [] cmax = 255.0 # float for division vmax = 65535.0 # float for division colors.append((0, (0, 0, 0, 0))) # transparent colors.append((1 / vmax, (34 / cmax, 139 / cmax, 34 / cmax, 1))) colors.append((45000 / vmax, (1, 165 / cmax, 0, 1))) # orange colors.append((55000 / vmax, (1, 0, 0, 1))) # red colors.append((1, (0, 0, 0, 1))) # black color_map = LinearSegmentedColormap.from_list(name="mycm", colors=colors, N=65535, gamma=1.0) elif (plot_type == "fcc"): # Colormap colors = [] cmax = 255.0 # float for division col_defor = (227, 26, 28, 255) col_defor = tuple(np.array(col_defor) / cmax) colors.append(col_defor) # default is red colors.append((51 / cmax, 160 / cmax, 44 / cmax, 1)) # forest green colors.append((0, 0, 0, 0)) # transparent color_map = ListedColormap(colors) elif (plot_type == "forest"): # Colormap colors = [] cmax = 255.0 # float for division colors.append((51 / cmax, 160 / cmax, 44 / cmax, 1)) # forest green colors.append((0, 0, 0, 0)) # transparent color_map = ListedColormap(colors) return(color_map) # raster2array def raster2array(input_prob_raster, n_overview=0): # Load raster and band rasterR = gdal.Open(input_prob_raster) rasterB = rasterR.GetRasterBand(1) # Get data from finest overview ov_band = rasterB.GetOverview(n_overview) ov_arr = ov_band.ReadAsArray() # Dereference driver rasterB = None del(rasterR) # Return figure return(ov_arr) # create new figure, axes instances. fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # setup mercator map projection. m = Basemap(llcrnrlon=-20., llcrnrlat=-40., urcrnrlon=60., urcrnrlat=40., resolution="l", projection="merc", lat_ts=20.) # Draw deforestation probability m.imshow(raster2array("prob.tif", n_overview=0), cmap=color_map(plot_type="prob"), origin="upper") # Draw country m.drawcoastlines() m.drawcountries() # draw parallels m.drawparallels(np.arange(-40, 40, 20), labels=[1, 1, 0, 1]) # draw meridians m.drawmeridians(np.arange(-20, 60, 20), labels=[1, 1, 0, 1]) ax.set_title("Test") plt.show() # End
gpl-3.0
5,391,069,074,789,299,000
33.031915
80
0.563926
false
astrobayes/BMAD
chapter_5/code_5.28.py
1
1554
# From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press # (c) 2017, Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida # # you are kindly asked to include the complete citation if you used this # material in a publication # Code 5.28 - Binomial model in Python using Stan # 1 response (y) and 2 explanatory variables (x1, x2) import numpy as np import statsmodels.api as sm import pystan from scipy.stats import uniform, poisson, binom # Data np.random.seed(33559) # set seed to replicate example nobs= 2000 # number of obs in model m = 1 + poisson.rvs(5, size=nobs) x1 = uniform.rvs(size=nobs) # random uniform variable x2 = uniform.rvs(size=nobs) beta0 = -2.0 beta1 = -1.5 beta2 = 3.0 xb = beta0 + beta1 * x1 + beta2 * x2 exb = np.exp(xb) p = exb / (1 + exb) y = binom.rvs(m, p) # create y as adjusted mydata = {} mydata['K'] = 3 mydata['X'] = sm.add_constant(np.column_stack((x1,x2))) mydata['N'] = nobs mydata['Y'] = y mydata['m'] = m # Fit stan_code = """ data{ int<lower=0> N; int<lower=0> K; matrix[N, K] X; int Y[N]; int m[N]; } parameters{ vector[K] beta; } transformed parameters{ vector[N] eta; vector[N] p; eta = X * beta; for (i in 1:N) p[i] = inv_logit(eta[i]); } model{ Y ~ binomial(m, p); } """ fit = pystan.stan(model_code=stan_code, data=mydata, iter=5000, chains=3, warmup=3000, n_jobs=3) # Output nlines = 8 output = str(fit).split('\n') for item in output[:nlines]: print(item)
gpl-3.0
-7,941,266,955,851,620,000
20.887324
73
0.618404
false
nortikin/sverchok
nodes/analyzer/path_length_2.py
2
6715
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import bpy from bpy.props import BoolProperty from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import updateNode, match_long_repeat import numpy as np def edges_aux(vertices): '''create auxiliary edges array ''' v_len = [len(v) for v in vertices] v_len_max = max(v_len) np_edges = np.add.outer(np.arange(v_len_max - 1), [0, 1]) return [np_edges] def edges_length(meshes, need_total=False, need_cumsum=False, need_cumsum1=False, as_numpy=False): '''calculate edges length ''' lengths_out = [] cumsum_out = [] cumsum_1_out = [] total_lengths_out = [] for vertices, edges in zip(*meshes): np_verts = np.array(vertices) if type(edges[0]) in (list, tuple): np_edges = np.array(edges) else: np_edges = edges[:len(vertices)-1, :] vect = np_verts[np_edges[:, 0], :] - np_verts[np_edges[:, 1], :] lengths = np.linalg.norm(vect, axis=1) if need_cumsum or need_cumsum1 or need_total: total_length = np.sum(lengths)[np.newaxis] else: total_length = None if need_cumsum or need_cumsum1: cumsum = np.cumsum(np.insert(lengths, 0, 0)) else: cumsum = None if need_cumsum1 and total_length is not None and cumsum is not None: cumsum_1 = cumsum / total_length else: cumsum_1 = None if not as_numpy: lengths = lengths.tolist() if cumsum is not None: cumsum = cumsum.tolist() if cumsum_1 is not None: cumsum_1 = cumsum_1.tolist() if total_length is not None: total_length = total_length.tolist() total_lengths_out.append(total_length) lengths_out.append(lengths) cumsum_out.append(cumsum) cumsum_1_out.append(cumsum_1) return lengths_out, cumsum_out, cumsum_1_out, total_lengths_out class SvPathLengthMk2Node(bpy.types.Node, SverchCustomTreeNode): ''' Triggers: Path / Edges length Tooltip: Measures the length of a path or the length of its segments ''' bl_idname = 'SvPathLengthMk2Node' bl_label = 'Path Length' sv_icon = 'SV_PATH_LENGTH' output_numpy : BoolProperty( name='Output NumPy', description='output NumPy arrays', default=False, update=updateNode) def draw_buttons_ext(self, context, layout): '''draw buttons on the N-panel''' layout.prop(self, "output_numpy", toggle=False) def sv_init(self, context): '''create sockets''' self.inputs.new('SvVerticesSocket', "Vertices") self.inputs.new('SvStringsSocket', "Edges") self.outputs.new('SvStringsSocket', "SegmentLength") self.outputs.new('SvStringsSocket', "TotalLength") self.outputs.new('SvStringsSocket', "CumulativeSum") self.outputs.new('SvStringsSocket', "CumulativeSum1") def get_data(self): '''get all data from sockets''' si = self.inputs vertices = si['Vertices'].sv_get() if si['Edges'].is_linked: edges_in = si['Edges'].sv_get() else: edges_in = edges_aux(vertices) return match_long_repeat([vertices, edges_in]) def process(self): '''main node function called every update''' if not any(socket.is_linked for socket in self.outputs): return meshes = self.get_data() lengths_out, cumsum_out, cumsum_1_out, total_lengths_out = edges_length(meshes, need_total = self.outputs['TotalLength'].is_linked, need_cumsum = self.outputs['CumulativeSum'].is_linked, need_cumsum1 = self.outputs['CumulativeSum1'].is_linked, as_numpy=self.output_numpy) self.outputs['SegmentLength'].sv_set(lengths_out) self.outputs['TotalLength'].sv_set(total_lengths_out) self.outputs['CumulativeSum'].sv_set(cumsum_out) self.outputs['CumulativeSum1'].sv_set(cumsum_1_out) def migrate_links_from(self, old_node, operator): if old_node.bl_idname != 'SvPathLengthNode': return tree = self.id_data old_in_links = [link for link in tree.links if link.to_node == old_node] old_out_links = [link for link in tree.links if link.from_node == old_node] for old_link in old_in_links: new_target_socket_name = operator.get_new_input_name(old_link.to_socket.name) if new_target_socket_name in self.inputs: new_target_socket = self.inputs[new_target_socket_name] new_link = tree.links.new(old_link.from_socket, new_target_socket) else: self.debug("New node %s has no input named %s, skipping", self.name, new_target_socket_name) tree.links.remove(old_link) for old_link in old_out_links: if old_node.segment: new_source_socket_name = 'SegmentLength' else: new_source_socket_name = 'TotalLength' # We have to remove old link before creating new one # Blender would not allow two links pointing to the same target socket old_target_socket = old_link.to_socket tree.links.remove(old_link) if new_source_socket_name in self.outputs: new_source_socket = self.outputs[new_source_socket_name] new_link = tree.links.new(new_source_socket, old_target_socket) else: self.debug("New node %s has no output named %s, skipping", self.name, new_source_socket_name) def register(): '''register class in Blender''' bpy.utils.register_class(SvPathLengthMk2Node) def unregister(): '''unregister class in Blender''' bpy.utils.unregister_class(SvPathLengthMk2Node)
gpl-3.0
-5,332,234,326,293,690,000
35.693989
109
0.618317
false
seung-lab/neuroglancer
python/examples/flood_filling_simulation.py
1
7883
#!/usr/bin/env python """Example of display interactive flood-filling "inference" results. shift+mousedown0 triggers the simulated flood filling to start with an initial seed at the mouse position. The computed mask values are displayed as an image, while the seed points chosen are displayed as point annotations. keyt causes the simulated flood filling to stop. In this example, the mask values are actually just computed as a distance transform of the ground truth segmentation, and the seed points are restricted to the ground truth segment and assign random priorities. In actual use, this same visualization approach can be used to display the actual mask and seed points computed by a flood filling TensorFlow model. The cloudvolume library (https://github.com/seung-lab/cloud-volume) is used to retrieve patches of the ground truth volume. The zarr library is used to represent the sparse in-memory array containing the computed inference results that are displayed in neuroglancer. """ import random import time import threading import neuroglancer import cloudvolume import zarr import numpy as np import scipy.ndimage class InteractiveInference(object): def __init__(self): viewer = self.viewer = neuroglancer.Viewer() self.gt_vol = cloudvolume.CloudVolume( 'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth', mip=0, bounded=True, progress=False, provenance={}) viewer.actions.add('start-fill', self._start_fill_action) viewer.actions.add('stop-fill', self._stop_fill_action) with viewer.config_state.txn() as s: s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill' s.input_event_bindings.data_view['keyt'] = 'stop-fill' with viewer.txn() as s: s.layers['image'] = neuroglancer.ImageLayer( source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image', ) s.layers['ground_truth'] = neuroglancer.SegmentationLayer( source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth', ) s.layers['ground_truth'].visible = False self.flood_fill_event = None def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event): initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2])) gt_vol_zarr = zarr.zeros( self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint64) gt_blocks_seen = set() block_size = np.array((64, 64, 64), np.int64) def fetch_gt_block(block): spos = block * block_size epos = spos + block_size slice_expr = np.s_[int(spos[0]):int(epos[0]), int(spos[1]):int(epos[1]), int(spos[2]):int(epos[2])] rev_slice_expr = np.s_[int(spos[2]):int(epos[2]), int(spos[1]):int(epos[1]), int(spos[0]):int(epos[0])] gt_data = np.transpose(self.gt_vol[slice_expr][..., 0], (2, 1, 0)) gt_vol_zarr[rev_slice_expr] = gt_data def get_patch(spos, epos): spos = np.array(spos) epos = np.array(epos) sblock = spos // block_size eblock = (epos - 1) // block_size for blockoff in np.ndindex(tuple(eblock - sblock + 1)): block = np.array(blockoff) + sblock block_tuple = tuple(block) if block_tuple in gt_blocks_seen: continue gt_blocks_seen.add(block_tuple) fetch_gt_block(block) rev_slice_expr = np.s_[int(spos[2]):int(epos[2]), int(spos[1]):int(epos[1]), int(spos[0]):int(epos[0])] result = gt_vol_zarr[rev_slice_expr] return result segment_id = self.gt_vol[initial_pos][0] patch_size = np.array((33, ) * 3, np.int64) lower_bound = patch_size // 2 upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2 d = 8 seen = set() q = [] last_invalidate = [time.time()] invalidate_interval = 3 def enqueue(pos): if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return if pos in seen: return seen.add(pos) q.append(pos) def update_view(): if event.is_set(): return cur_time = time.time() if cur_time < last_invalidate[0] + invalidate_interval: return last_invalidate[0] = cur_time inf_volume.invalidate() with self.viewer.txn() as s: s.layers['points'].annotations = [ neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen) ] def process_pos(pos): spos = pos - patch_size // 2 epos = spos + patch_size rev_slice_expr = np.s_[int(spos[2]):int(epos[2]), int(spos[1]):int(epos[1]), int(spos[0]):int(epos[0])] gt_data = get_patch(spos, epos) mask = gt_data == segment_id for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)): if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue new_pos = np.array(pos) + np.array(offset) enqueue(tuple(new_pos)) dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask) inf_results[rev_slice_expr] = 1 + np.cast[np.uint8]( np.minimum(dist_transform, 5) / 5.0 * 254) self.viewer.defer_callback(update_view) enqueue(initial_pos) while len(q) > 0 and not event.is_set(): i = random.randint(0, len(q) - 1) pos = q[i] q[i] = q[-1] del q[-1] process_pos(pos) self.viewer.defer_callback(update_view) def _stop_flood_fill(self): if self.flood_fill_event is not None: self.flood_fill_event.set() self.flood_fill_event = None def _start_flood_fill(self, pos): self._stop_flood_fill() inf_results = zarr.zeros( self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint8) inf_volume = neuroglancer.LocalVolume( data=inf_results, voxel_size=list(self.gt_vol.resolution)) with self.viewer.txn() as s: s.layers['points'] = neuroglancer.AnnotationLayer() s.layers['inference'] = neuroglancer.ImageLayer( source=inf_volume, shader=''' void main() { float v = toNormalized(getDataValue(0)); vec4 rgba = vec4(0,0,0,0); if (v != 0.0) { rgba = vec4(colormapJet(v), 1.0); } emitRGBA(rgba); } ''', ) self.flood_fill_event = threading.Event() t = threading.Thread( target=self._do_flood_fill, kwargs=dict( initial_pos=pos, inf_results=inf_results, inf_volume=inf_volume, event=self.flood_fill_event, )) t.daemon = True t.start() def _start_fill_action(self, action_state): pos = action_state.mouse_voxel_coordinates if pos is None: return self._start_flood_fill(pos) def _stop_fill_action(self, action_state): self._stop_flood_fill() if __name__ == '__main__': inf = InteractiveInference() print(inf.viewer) while True: time.sleep(1000)
apache-2.0
-612,087,195,509,239,300
35.665116
96
0.554231
false
xfxf/veyepar
tests/upload_log_analyser.py
4
2988
#!/usr/bin/python import datetime import csv import StringIO import pprint # seconds from midnight, timestamp, bytes sent log="""56521.93324, 2011-06-24 15:42:01.933240, 0 56521.933569, 2011-06-24 15:42:01.933569, 1292 56521.933722, 2011-06-24 15:42:01.933722, 1488 56522.022575, 2011-06-24 15:42:02.022575, 16488 56522.023069, 2011-06-24 15:42:02.023069, 31488 56522.03704, 2011-06-24 15:42:02.037040, 46488 56522.079995, 2011-06-24 15:42:02.079995, 61488 56522.080119, 2011-06-24 15:42:02.080119, 76488 56522.116328, 2011-06-24 15:42:02.116328, 91488""" reader = csv.reader(open('uploadlog.csv', 'rb')) # reader = csv.reader(StringIO.StringIO(log)) i=0 dat = [] last_sec = 0 for row in reader: sec=float(row[0]) bytes_sent = int(row[2]) if last_sec: timestamp = datetime.datetime.strptime(row[1],' %Y-%m-%d %H:%M:%S.%f') duration=sec - last_sec chunk = bytes_sent - last_bytes bps = chunk/duration dat.append( [sec-first_sec, chunk, duration, bps, timestamp] ) else: first_sec = float(row[0]) last_sec = sec last_bytes = bytes_sent if sec-first_sec > 30: break # pprint.pprint(dat) # seconds from first log entry, # bytes sent for that entry, # seconds to send those bytes # bps (bytes/seconds) """ [[0.00032900000223889947, 1292, 0.00032900000223889947, 3927051.645008286], [0.00048200000310316682, 196, 0.00015300000086426735, 1281045.7443976079], [0.089335000004211906, 15000, 0.08885300000110874, 168818.16033012758], [0.089829000004101545, 15000, 0.00049399999988963827, 30364372.476419158], [0.10380000000441214, 15000, 0.013971000000310596, 1073652.5660057638], [0.14675500000157626, 15000, 0.042954999997164123, 349202.65396322421], [0.14687899999989895, 15000, 0.00012399999832268804, 120967743.57177937], [0.18308799999795156, 15000, 0.036208999998052604, 414261.6476789398]] """ xys = [ (int(row[0]*10000),int(row[3])) for row in dat ] # pprint.pprint( xys ) import os, tempfile, random, cStringIO os.environ['MPLCONfigureDIR'] = tempfile.mkdtemp() from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure def plot(title='title',xlab='x',ylab='y', data={'xxx':[(0,0),(1,1),(1,2),(3,3)], 'yyy':[(0,0,.2),(2,1,0.2),(2,2,0.2),(3,3,0.2)]}, filename='test.png'): figure=Figure(frameon=False) axes=figure.add_subplot(111) if title: axes.set_title(title) if xlab: axes.set_xlabel(xlab) if ylab: axes.set_ylabel(ylab) keys=sorted(data) for key in keys: stream = data[key] (x,y)=([],[]) for point in stream: x.append(point[0]) y.append(point[1]) axes.plot(x, y, linewidth="2") canvas=FigureCanvas(figure) stream=open(filename,'wb') canvas.print_png(stream) points=[(3, 3927051), (4, 1281045), (893, 168818),] # plot(data={'mypoints':points},filename='test.png') plot(data={'mypoints':xys},filename='test.png')
mit
-1,260,176,071,623,473,000
31.129032
78
0.676372
false
WillArmentrout/galSims
plotting/ClusterAngular.py
1
8649
import matplotlib.pyplot as plt import math import csv import argparse '''parser = argparse.ArgumentParser() parser.add_argument("iterTotal", type=int, help="Number of Clustering Iterations") args = parser.parse_args() iterTot = args.iterTotal''' # Prompt User for number of iterations of clustering iterTot = 1 #random number # Open CSV File datafile = open(r'C:\Users\newye\OneDrive\Documents\GitHub\galSims\misc\3DHiiRegions.csv', 'r') csvFile = [] newFile = [] removeList = [] for row in datafile: newFile.append(row.strip().split(',')) originalSize = len(newFile) # Save Galactic Radius Info from CSV to new list (iterNum,index,target,engulf,overlap,separate)=(0,0,0,0,0,0) sunPos = 8.4 sunHeight = 0.02 maxVelo = 10 # Sets max velocity difference in clustered sources to be (x) km/s while iterNum < iterTot : csvFile = newFile newFile = [] while (target < len(csvFile)) : xTarget = float(csvFile[target][1]) yTarget = float(csvFile[target][2]) zTarget = float(csvFile[target][3]) massTarget = float(csvFile[target][4]) lumTarget = float(csvFile[target][5]) ageTarget = float(csvFile[target][6]) radTarget = float(csvFile[target][7]) lTarget = float(csvFile[target][8])*math.pi/180 veloTarget = float(csvFile[target][9]) regNumTarget = float(csvFile[target][10]) bTarget = float(csvFile[target][11])*math.pi/180 index = 0 while (index < len(csvFile)) : xIndex = float(csvFile[index][1]) yIndex = float(csvFile[index][2]) zIndex = float(csvFile[index][3]) massIndex = float(csvFile[index][4]) lumIndex = float(csvFile[index][5]) ageIndex = float(csvFile[index][6]) radIndex = float(csvFile[index][7]) lIndex = float(csvFile[target][8])*math.pi/180 veloIndex = float(csvFile[index][9]) regNumIndex = float(csvFile[target][10]) bIndex = float(csvFile[index][11])*math.pi/180 indexDist = pow(pow(xIndex,2)+pow(yIndex-sunPos,2),0.5)*1000 targetDist = pow(pow(xTarget,2)+pow(yTarget-sunPos,2),0.5)*1000 #print(str(bTarget) + " " + str(bIndex) + " " + str(lTarget) + " " + str(lIndex)) try : angDist = math.acos(math.sin(bTarget)*math.sin(bIndex)+math.cos(bTarget)*math.cos(bIndex)*math.cos(lTarget-lIndex)) except : print("angDist problem") #print(angDist) angRadIndex = math.atan(radIndex/indexDist) angRadTarget = math.atan(radTarget/targetDist) # IN FUTURE SIMULATIONS : Throw away overlapping/engulfed regions # and repopulate. I would imagine these to be fairly rare. # One region is totally engulfed by the other. if (abs(veloTarget-veloIndex) < maxVelo) and (index != target) : if angDist < abs(angRadIndex - angRadTarget) : # Region is engulfed if (angRadIndex - angRadTarget) < 0 : # Target is larger xNew = xTarget yNew = yTarget zNew = zTarget galRad = pow(pow(xNew,2)+pow(yNew,2),.5) radNew = radTarget effMass = massTarget + massIndex lumNew = lumIndex + lumTarget ageNew = (massTarget*ageTarget+massIndex*ageIndex)/(massTarget+massIndex) ## NEED A BETTER WAY TO DEAL WITH LUMINOSITIES try : veloNew = (lumTarget*veloTarget+lumIndex*veloIndex)/(lumTarget+lumIndex) except : veloNew = veloTarget print ("Velo Problem" + str(index)) sunDist = pow(pow(xNew,2)+pow(yNew-sunPos,2),0.5) lNew = math.copysign(math.acos((pow(sunDist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*sunDist))*180/math.pi,xNew) bNew = math.atan((zNew-sunHeight)/sunDist) else : # Index is larger xNew = xIndex yNew = yIndex zNew = zIndex galRad = pow(pow(xNew,2)+pow(yNew,2),.5) radNew = radIndex effMass = massTarget + massIndex lumNew = lumIndex + lumTarget ageNew = (massTarget*ageTarget+massIndex*ageIndex)/(massTarget+massIndex) try : veloNew = (lumTarget*veloTarget+lumIndex*veloIndex)/(lumTarget+lumIndex) except : veloNew = veloTarget print ("Velo Problem" + str(index)) sunDist = pow(pow(xNew,2)+pow(yNew-sunPos,2),0.5) lNew = math.copysign(math.acos((pow(sunDist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*sunDist))*180/math.pi,xNew) bNew = math.atan((zNew-sunHeight)/sunDist) removeList.append(index) removeList.append(target) engulf += 1 index = int(len(csvFile)) # Skip remaining regions newFile.append([galRad,xNew,yNew,zNew,effMass,lumNew,ageNew,radNew,lNew,veloNew,regNumTarget,bNew]) # Add new region to end of file. # print "Engulf : " + str(target) # Regions overlap. Place new region in barycenter of old regions. elif angDist < abs(angRadTarget + angRadIndex) : xNew = (xTarget*massTarget + xIndex*massIndex)/(massTarget+massIndex) yNew = (yTarget*massTarget + yIndex*massIndex)/(massTarget+massIndex) zNew = (zTarget*massTarget + zIndex*massIndex)/(massTarget+massIndex) galRad = pow(pow(xNew,2)+pow(yNew,2),.5) radNew = (radTarget + radIndex + angDist)/2 effMass = massTarget + massIndex lumNew = lumTarget + lumIndex ageNew = (massTarget*ageTarget+massIndex*ageIndex)/(massTarget+massIndex) try : veloNew = (lumTarget*veloTarget+lumIndex*veloIndex)/(lumTarget+lumIndex) except : veloNew = veloTarget print ("Velo Problem" + str(index)) sunDist = pow(pow(xNew,2)+pow(yNew-sunPos,2),0.5) lNew = math.copysign(math.acos((pow(sunDist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*sunDist))*180/math.pi,xNew) bNew = math.atan((zNew-sunHeight)/sunDist) removeList.append(index) removeList.append(target) overlap += 1 index = int(len(csvFile)) # Skip remaining regions newFile.append([galRad,xNew,yNew,zNew,effMass,lumNew,ageNew,radNew,lNew,veloNew,regNumTarget,bNew]) # print "Overlap : " + str(target) # Regions don't interact. elif int(index) == int(len(csvFile)-1) : separate += 1 galRad = pow(pow(xIndex,2)+pow(yIndex,2),.5) newFile.append([galRad,xIndex,yIndex,zIndex,massIndex,lumIndex,ageIndex,radIndex,lIndex,veloIndex,regNumIndex,bIndex]) if int(index) == int(len(csvFile)-1) : i = 0 while i < len(removeList): try: b=removeList.index(i) # print csvFile[b] del csvFile[b] except: pass i += 1 # newFile.extend(csvFile) index += 1 target += 1 iterNum += 1 with open(r"C:/Users/newye/OneDrive/Documents/GitHub/galSims/misc/3DHiiRegionsAngularCombine.csv", "w", newline = "") as f: writer = csv.writer(f) writer.writerows(newFile) print ("Engulfed Regions : " + str(float(engulf*100/len(csvFile))) + "%") print ("Overlapped Regions : " + str(float(overlap*100/len(csvFile))) + "%") print ("Separate Regions : " + str(float(separate*100/len(csvFile))) + "%") print (engulf) print (overlap) print (separate) print (len(csvFile))
gpl-2.0
4,347,766,149,789,477,400
47.864407
152
0.530582
false
vascotenner/holoviews
holoviews/element/comparison.py
1
24350
""" Helper classes for comparing the equality of two HoloViews objects. These classes are designed to integrate with unittest.TestCase (see the tests directory) while making equality testing easily accessible to the user. For instance, to test if two Matrix objects are equal you can use: Comparison.assertEqual(matrix1, matrix2) This will raise an AssertionError if the two matrix objects are not equal, including information regarding what exactly failed to match. Note that this functionality could not be provided using comparison methods on all objects as comparison opertors only return Booleans and thus would not supply any information regarding *why* two elements are considered different. """ import numpy as np from unittest.util import safe_repr from unittest import TestCase from numpy.testing import assert_array_equal, assert_array_almost_equal from . import * # noqa (All Elements need to support comparison) from ..core import (Element, Empty, AdjointLayout, Overlay, Dimension, HoloMap, Dimensioned, Layout, NdLayout, NdOverlay, GridSpace, DynamicMap, GridMatrix) from ..core.options import Options, Cycle from ..interface.pandas import DFrame as PandasDFrame from ..interface.pandas import DataFrameView from ..interface.seaborn import DFrame, Bivariate, Distribution, \ Regression, TimeSeries class ComparisonInterface(object): """ This class is designed to allow equality testing to work seamlessly with unittest.TestCase as a mix-in by implementing a compatible interface (namely the assertEqual method). The assertEqual class method is to be overridden by an instance method of the same name when used as a mix-in with TestCase. The contents of the equality_type_funcs dictionary is suitable for use with TestCase.addTypeEqualityFunc. """ equality_type_funcs = {} failureException = AssertionError @classmethod def simple_equality(cls, first, second, msg=None): """ Classmethod equivalent to unittest.TestCase method (longMessage = False.) """ if not first==second: standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second)) raise cls.failureException(msg or standardMsg) @classmethod def assertEqual(cls, first, second, msg=None): """ Classmethod equivalent to unittest.TestCase method """ asserter = None if type(first) is type(second): asserter = cls.equality_type_funcs.get(type(first)) try: basestring = basestring # Python 2 except NameError: basestring = str # Python 3 if asserter is not None: if isinstance(asserter, basestring): asserter = getattr(cls, asserter) if asserter is None: asserter = cls.simple_equality if msg is None: asserter(first, second) else: asserter(first, second, msg=msg) class Comparison(ComparisonInterface): """ Class used for comparing two HoloViews objects, including complex composite objects. Comparisons are available as classmethods, the most general being the assertEqual method that is intended to work with any input. For instance, to test if two Image objects are equal you can use: Comparison.assertEqual(matrix1, matrix2) """ @classmethod def register(cls): # Float comparisons cls.equality_type_funcs[float] = cls.compare_floats cls.equality_type_funcs[np.float] = cls.compare_floats cls.equality_type_funcs[np.float32] = cls.compare_floats cls.equality_type_funcs[np.float64] = cls.compare_floats #Dictionary comparisons cls.equality_type_funcs[dict] = cls.compare_dictionaries # Numpy array comparison cls.equality_type_funcs[np.ndarray] = cls.compare_arrays # Dimension objects cls.equality_type_funcs[Dimension] = cls.compare_dimensions cls.equality_type_funcs[Dimensioned] = cls.compare_dimensioned # Used in unit tests cls.equality_type_funcs[Element] = cls.compare_elements # Used in unit tests # Composition (+ and *) cls.equality_type_funcs[Overlay] = cls.compare_overlays cls.equality_type_funcs[Layout] = cls.compare_layouttrees cls.equality_type_funcs[Empty] = cls.compare_empties # Annotations cls.equality_type_funcs[VLine] = cls.compare_vline cls.equality_type_funcs[HLine] = cls.compare_hline cls.equality_type_funcs[Spline] = cls.compare_spline cls.equality_type_funcs[Arrow] = cls.compare_arrow cls.equality_type_funcs[Text] = cls.compare_text # Path comparisons cls.equality_type_funcs[Path] = cls.compare_paths cls.equality_type_funcs[Contours] = cls.compare_contours cls.equality_type_funcs[Polygons] = cls.compare_polygons cls.equality_type_funcs[Box] = cls.compare_box cls.equality_type_funcs[Ellipse] = cls.compare_ellipse cls.equality_type_funcs[Bounds] = cls.compare_bounds # Rasters cls.equality_type_funcs[Image] = cls.compare_image cls.equality_type_funcs[RGB] = cls.compare_rgb cls.equality_type_funcs[HSV] = cls.compare_hsv cls.equality_type_funcs[Raster] = cls.compare_raster cls.equality_type_funcs[QuadMesh] = cls.compare_quadmesh cls.equality_type_funcs[Surface] = cls.compare_surface cls.equality_type_funcs[HeatMap] = cls.compare_dataset # Charts cls.equality_type_funcs[Dataset] = cls.compare_dataset cls.equality_type_funcs[Curve] = cls.compare_curve cls.equality_type_funcs[ErrorBars] = cls.compare_errorbars cls.equality_type_funcs[Spread] = cls.compare_spread cls.equality_type_funcs[Area] = cls.compare_area cls.equality_type_funcs[Scatter] = cls.compare_scatter cls.equality_type_funcs[Scatter3D] = cls.compare_scatter3d cls.equality_type_funcs[Trisurface] = cls.compare_trisurface cls.equality_type_funcs[Histogram] = cls.compare_histogram cls.equality_type_funcs[Bars] = cls.compare_bars cls.equality_type_funcs[Spikes] = cls.compare_spikes cls.equality_type_funcs[BoxWhisker] = cls.compare_boxwhisker cls.equality_type_funcs[VectorField] = cls.compare_vectorfield # Tables cls.equality_type_funcs[ItemTable] = cls.compare_itemtables cls.equality_type_funcs[Table] = cls.compare_tables cls.equality_type_funcs[Points] = cls.compare_points # Pandas DFrame objects cls.equality_type_funcs[DataFrameView] = cls.compare_dframe cls.equality_type_funcs[PandasDFrame] = cls.compare_dframe cls.equality_type_funcs[DFrame] = cls.compare_dframe # Seaborn Views cls.equality_type_funcs[Bivariate] = cls.compare_bivariate cls.equality_type_funcs[Distribution] = cls.compare_distribution cls.equality_type_funcs[Regression] = cls.compare_regression cls.equality_type_funcs[TimeSeries] = cls.compare_timeseries # NdMappings cls.equality_type_funcs[NdLayout] = cls.compare_gridlayout cls.equality_type_funcs[AdjointLayout] = cls.compare_adjointlayouts cls.equality_type_funcs[NdOverlay] = cls.compare_ndoverlays cls.equality_type_funcs[GridSpace] = cls.compare_grids cls.equality_type_funcs[GridMatrix] = cls.compare_grids cls.equality_type_funcs[HoloMap] = cls.compare_holomap cls.equality_type_funcs[DynamicMap] = cls.compare_dynamicmap # Option objects cls.equality_type_funcs[Options] = cls.compare_options cls.equality_type_funcs[Cycle] = cls.compare_cycles return cls.equality_type_funcs @classmethod def compare_dictionaries(cls, d1, d2, msg='Dictionaries'): keys= set(d1.keys()) keys2 = set(d2.keys()) symmetric_diff = keys ^ keys2 if symmetric_diff: msg = ("Dictionaries have different sets of keys: %r\n\n" % symmetric_diff) msg += "Dictionary 1: %s\n" % d1 msg += "Dictionary 2: %s" % d2 raise cls.failureException(msg) for k in keys: cls.assertEqual(d1[k], d2[k]) #=====================# # Literal comparisons # #=====================# @classmethod def compare_floats(cls, arr1, arr2, msg='Floats'): cls.compare_arrays(arr1, arr2, msg) @classmethod def compare_arrays(cls, arr1, arr2, msg='Arrays'): try: assert_array_equal(arr1, arr2) except: try: assert_array_almost_equal(arr1, arr2) except AssertionError as e: raise cls.failureException(msg + str(e)[11:]) @classmethod def bounds_check(cls, el1, el2, msg=None): if el1.bounds.lbrt() != el2.bounds.lbrt(): raise cls.failureException("BoundingBoxes are mismatched.") #=======================================# # Dimension and Dimensioned comparisons # #=======================================# @classmethod def compare_dimensions(cls, dim1, dim2, msg=None): if dim1.name != dim2.name: raise cls.failureException("Dimension names mismatched: %s != %s" % (dim1.name, dim2.name)) if dim1.cyclic != dim2.cyclic: raise cls.failureException("Dimension cyclic declarations mismatched.") if dim1.range != dim2.range: raise cls.failureException("Dimension ranges mismatched: %s != %s" % (dim1.range, dim2.range)) if dim1.type != dim2.type: raise cls.failureException("Dimension type declarations mismatched: %s != %s" % (dim1.type,dim2.type)) if dim1.unit != dim2.unit: raise cls.failureException("Dimension unit declarations mismatched: %s != %s" % (dim1.unit , dim2.unit)) if dim1.values != dim2.values: raise cls.failureException("Dimension value declarations mismatched: %s != %s" % (dim1.values , dim2.values)) @classmethod def compare_labelled_data(cls, obj1, obj2, msg=None): cls.assertEqual(obj1.group, obj2.group, "Group labels mismatched.") cls.assertEqual(obj1.label, obj2.label, "Labels mismatched.") @classmethod def compare_dimension_lists(cls, dlist1, dlist2, msg='Dimension lists'): if len(dlist1) != len(dlist2): raise cls.failureException('%s mismatched' % msg) for d1, d2 in zip(dlist1, dlist2): cls.assertEqual(d1, d2) @classmethod def compare_dimensioned(cls, obj1, obj2, msg=None): cls.compare_labelled_data(obj1, obj2) cls.compare_dimension_lists(obj1.vdims, obj2.vdims, 'Value dimension list') cls.compare_dimension_lists(obj1.kdims, obj2.kdims, 'Key dimension list') @classmethod def compare_elements(cls, obj1, obj2, msg=None): cls.compare_labelled_data(obj1, obj2) cls.assertEqual(obj1.data, obj2.data) #===============================# # Compositional trees (+ and *) # #===============================# @classmethod def compare_trees(cls, el1, el2, msg='Trees'): if len(el1.keys()) != len(el2.keys()): raise cls.failureException("%s have mismatched path counts." % msg) if el1.keys() != el2.keys(): raise cls.failureException("%s have mismatched paths." % msg) for element1, element2 in zip(el1.values(), el2.values()): cls.assertEqual(element1, element2) @classmethod def compare_layouttrees(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) cls.compare_trees(el1, el2, msg='Layouts') @classmethod def compare_empties(cls, el1, el2, msg=None): if not all(isinstance(el, Empty) for el in [el1, el2]): raise cls.failureException("Compared elements are not both Empty()") @classmethod def compare_overlays(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) cls.compare_trees(el1, el2, msg='Overlays') #================================# # AttrTree and Map based classes # #================================# @classmethod def compare_ndmappings(cls, el1, el2, msg='NdMappings'): cls.compare_dimensioned(el1, el2) if len(el1.keys()) != len(el2.keys()): raise cls.failureException("%s have different numbers of keys." % msg) if set(el1.keys()) != set(el2.keys()): diff1 = [el for el in el1.keys() if el not in el2.keys()] diff2 = [el for el in el2.keys() if el not in el1.keys()] raise cls.failureException("%s have different sets of keys. " % msg + "In first, not second %s. " % diff1 + "In second, not first: %s." % diff2) for element1, element2 in zip(el1, el2): cls.assertEqual(element1, element2) @classmethod def compare_holomap(cls, el1, el2, msg='HoloMaps'): cls.compare_dimensioned(el1, el2) cls.compare_ndmappings(el1, el2, msg) @classmethod def compare_dynamicmap(cls, el1, el2, msg='DynamicMap'): cls.compare_dimensioned(el1, el2) cls.compare_ndmappings(el1, el2, msg) @classmethod def compare_gridlayout(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) if len(el1) != len(el2): raise cls.failureException("Layouts have different sizes.") if set(el1.keys()) != set(el2.keys()): raise cls.failureException("Layouts have different keys.") for element1, element2 in zip(el1, el2): cls.assertEqual(element1,element2) @classmethod def compare_ndoverlays(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) if len(el1) != len(el2): raise cls.failureException("NdOverlays have different lengths.") for (layer1, layer2) in zip(el1, el2): cls.assertEqual(layer1, layer2) @classmethod def compare_adjointlayouts(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) for element1, element2 in zip(el1, el1): cls.assertEqual(element1, element2) #=============# # Annotations # #=============# @classmethod def compare_annotation(cls, el1, el2, msg='Annotation'): cls.compare_dimensioned(el1, el2) cls.assertEqual(el1.data, el2.data) @classmethod def compare_hline(cls, el1, el2, msg='HLine'): cls.compare_annotation(el1, el2, msg=msg) @classmethod def compare_vline(cls, el1, el2, msg='VLine'): cls.compare_annotation(el1, el2, msg=msg) @classmethod def compare_spline(cls, el1, el2, msg='Spline'): cls.compare_annotation(el1, el2, msg=msg) @classmethod def compare_arrow(cls, el1, el2, msg='Arrow'): cls.compare_annotation(el1, el2, msg=msg) @classmethod def compare_text(cls, el1, el2, msg='Text'): cls.compare_annotation(el1, el2, msg=msg) #=======# # Paths # #=======# @classmethod def compare_paths(cls, el1, el2, msg='Path'): cls.compare_dimensioned(el1, el2) if len(el1.data) != len(el2.data): raise cls.failureException("%s objects do not have a matching number of paths." % msg) for arr1, arr2 in zip(el1.data, el2.data): cls.compare_arrays(arr1, arr2, '%s data' % msg) @classmethod def compare_contours(cls, el1, el2, msg='Contours'): if el1.level != el2.level: raise cls.failureException("Contour levels are mismatched") cls.compare_paths(el1, el2, msg=msg) @classmethod def compare_polygons(cls, el1, el2, msg='Polygons'): if el1.level != el2.level: raise cls.failureException("Polygon levels are mismatched") cls.compare_paths(el1, el2, msg=msg) @classmethod def compare_box(cls, el1, el2, msg='Box'): cls.compare_paths(el1, el2, msg=msg) @classmethod def compare_ellipse(cls, el1, el2, msg='Ellipse'): cls.compare_paths(el1, el2, msg=msg) @classmethod def compare_bounds(cls, el1, el2, msg='Bounds'): cls.compare_paths(el1, el2, msg=msg) #========# # Charts # #========# @classmethod def compare_dataset(cls, el1, el2, msg='Dataset'): cls.compare_dimensioned(el1, el2) if len(el1) != len(el2): raise AssertionError("%s not of matching length." % msg) dimension_data = [(d, el1[d], el2[d]) for d in el1.dimensions()] for dim, d1, d2 in dimension_data: if d1.dtype != d2.dtype: cls.failureException("%s %s columns have different type." % (msg, dim) + " First has type %s, and second has type %s." % (d1, d2)) if d1.dtype.kind in 'SUOV': if list(d1) == list(d2): cls.failureException("%s along dimension %s not equal." % (msg, dim)) else: cls.compare_arrays(d1, d2, msg) @classmethod def compare_curve(cls, el1, el2, msg='Curve'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_errorbars(cls, el1, el2, msg='ErrorBars'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_spread(cls, el1, el2, msg='Spread'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_area(cls, el1, el2, msg='Area'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_scatter(cls, el1, el2, msg='Scatter'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_scatter3d(cls, el1, el2, msg='Scatter3D'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_trisurface(cls, el1, el2, msg='Trisurface'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_histogram(cls, el1, el2, msg='Histogram'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.edges, el2.edges, ' '.join([msg, 'edges'])) cls.compare_arrays(el1.values, el2.values, ' '.join([msg, 'values'])) @classmethod def compare_points(cls, el1, el2, msg='Points'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_vectorfield(cls, el1, el2, msg='VectorField'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_bars(cls, el1, el2, msg='Bars'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_spikes(cls, el1, el2, msg='Spikes'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_boxwhisker(cls, el1, el2, msg='BoxWhisker'): cls.compare_dataset(el1, el2, msg) #=========# # Rasters # #=========# @classmethod def compare_raster(cls, el1, el2, msg='Raster'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg) @classmethod def compare_quadmesh(cls, el1, el2, msg='QuadMesh'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data[0], el2.data[0], ' '.join([msg, 'x-data'])) cls.compare_arrays(el1.data[1], el2.data[1], ' '.join([msg, 'y-data'])) cls.compare_arrays(el1.data[2], el2.data[2], ' '.join([msg, 'z-data'])) @classmethod def compare_heatmap(cls, el1, el2, msg='HeatMap'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg) @classmethod def compare_image(cls, el1, el2, msg='Image'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg) cls.bounds_check(el1,el2) @classmethod def compare_rgb(cls, el1, el2, msg='RGB'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg=msg) cls.bounds_check(el1,el2) @classmethod def compare_hsv(cls, el1, el2, msg='HSV'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg=msg) cls.bounds_check(el1,el2) @classmethod def compare_surface(cls, el1, el2, msg='Surface'): cls.compare_dimensioned(el1, el2) cls.compare_arrays(el1.data, el2.data, msg=msg) #========# # Tables # #========# @classmethod def compare_itemtables(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) if el1.rows != el2.rows: raise cls.failureException("ItemTables have different numbers of rows.") if el1.cols != el2.cols: raise cls.failureException("ItemTables have different numbers of columns.") if [d.name for d in el1.vdims] != [d.name for d in el2.vdims]: raise cls.failureException("ItemTables have different Dimensions.") @classmethod def compare_tables(cls, el1, el2, msg='Table'): cls.compare_dataset(el1, el2, msg) #========# # Pandas # #========# @classmethod def compare_dframe(cls, el1, el2, msg='DFrame'): cls.compare_dimensioned(el1, el2) from pandas.util.testing import assert_frame_equal try: df1 = el1.data.reset_index(drop=True) df2 = el2.data.reset_index(drop=True) assert_frame_equal(df1, df2) except AssertionError as e: raise cls.failureException(msg+': '+str(e)) #=========# # Seaborn # #=========# @classmethod def compare_distribution(cls, el1, el2, msg='Distribution'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_timeseries(cls, el1, el2, msg='TimeSeries'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_bivariate(cls, el1, el2, msg='Bivariate'): cls.compare_dataset(el1, el2, msg) @classmethod def compare_regression(cls, el1, el2, msg='Regression'): cls.compare_dataset(el1, el2, msg) #=======# # Grids # #=======# @classmethod def _compare_grids(cls, el1, el2, name): if len(el1.keys()) != len(el2.keys()): raise cls.failureException("%ss have different numbers of items." % name) if set(el1.keys()) != set(el2.keys()): raise cls.failureException("%ss have different keys." % name) if len(el1) != len(el2): raise cls.failureException("%ss have different depths." % name) for element1, element2 in zip(el1, el2): cls.assertEqual(element1, element2) @classmethod def compare_grids(cls, el1, el2, msg=None): cls.compare_dimensioned(el1, el2) cls._compare_grids(el1, el2, 'GridSpace') #=========# # Options # #=========# @classmethod def compare_options(cls, options1, options2, msg=None): cls.assertEqual(options1.kwargs, options2.kwargs) @classmethod def compare_cycles(cls, cycle1, cycle2, msg=None): cls.assertEqual(cycle1.values, cycle2.values) @classmethod def compare_channelopts(cls, opt1, opt2, msg=None): cls.assertEqual(opt1.mode, opt2.mode) cls.assertEqual(opt1.pattern, opt2.pattern) cls.assertEqual(opt1.patter, opt2.pattern) class ComparisonTestCase(Comparison, TestCase): """ Class to integrate the Comparison class with unittest.TestCase. """ def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) registry = Comparison.register() for k, v in registry.items(): self.addTypeEqualityFunc(k, v)
bsd-3-clause
8,527,686,448,773,649,000
35.074074
98
0.604928
false
jostep/tensorflow
tensorflow/python/debug/cli/analyzer_cli.py
3
55928
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CLI Backend for the Analyzer Part of the Debugger. The analyzer performs post hoc analysis of dumped intermediate tensors and graph structure information from debugged Session.run() calls. The other part of the debugger is the stepper (c.f. stepper_cli.py). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import copy import re from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.debug.cli import cli_shared from tensorflow.python.debug.cli import command_parser from tensorflow.python.debug.cli import debugger_cli_common from tensorflow.python.debug.cli import evaluator from tensorflow.python.debug.cli import ui_factory from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import source_utils RL = debugger_cli_common.RichLine # String constants for the depth-dependent hanging indent at the beginning # of each line. HANG_UNFINISHED = "| " # Used for unfinished recursion depths. HANG_FINISHED = " " HANG_SUFFIX = "|- " # String constant for displaying depth and op type. DEPTH_TEMPLATE = "(%d) " OP_TYPE_TEMPLATE = "[%s] " # String constants for control inputs/outputs, etc. CTRL_LABEL = "(Ctrl) " ELLIPSIS = "..." SORT_TENSORS_BY_TIMESTAMP = "timestamp" SORT_TENSORS_BY_DUMP_SIZE = "dump_size" SORT_TENSORS_BY_OP_TYPE = "op_type" SORT_TENSORS_BY_TENSOR_NAME = "tensor_name" def _add_main_menu(output, node_name=None, enable_list_tensors=True, enable_node_info=True, enable_print_tensor=True, enable_list_inputs=True, enable_list_outputs=True): """Generate main menu for the screen output from a command. Args: output: (debugger_cli_common.RichTextLines) the output object to modify. node_name: (str or None) name of the node involved (if any). If None, the menu items node_info, list_inputs and list_outputs will be automatically disabled, overriding the values of arguments enable_node_info, enable_list_inputs and enable_list_outputs. enable_list_tensors: (bool) whether the list_tensor menu item will be enabled. enable_node_info: (bool) whether the node_info item will be enabled. enable_print_tensor: (bool) whether the print_tensor item will be enabled. enable_list_inputs: (bool) whether the item list_inputs will be enabled. enable_list_outputs: (bool) whether the item list_outputs will be enabled. """ menu = debugger_cli_common.Menu() menu.append( debugger_cli_common.MenuItem( "list_tensors", "list_tensors", enabled=enable_list_tensors)) if node_name: menu.append( debugger_cli_common.MenuItem( "node_info", "node_info -a -d -t %s" % node_name, enabled=enable_node_info)) menu.append( debugger_cli_common.MenuItem( "print_tensor", "print_tensor %s" % node_name, enabled=enable_print_tensor)) menu.append( debugger_cli_common.MenuItem( "list_inputs", "list_inputs -c -r %s" % node_name, enabled=enable_list_inputs)) menu.append( debugger_cli_common.MenuItem( "list_outputs", "list_outputs -c -r %s" % node_name, enabled=enable_list_outputs)) else: menu.append( debugger_cli_common.MenuItem( "node_info", None, enabled=False)) menu.append( debugger_cli_common.MenuItem("print_tensor", None, enabled=False)) menu.append( debugger_cli_common.MenuItem("list_inputs", None, enabled=False)) menu.append( debugger_cli_common.MenuItem("list_outputs", None, enabled=False)) menu.append( debugger_cli_common.MenuItem("run_info", "run_info")) menu.append( debugger_cli_common.MenuItem("help", "help")) output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu class DebugAnalyzer(object): """Analyzer for debug data from dump directories.""" _TIMESTAMP_COLUMN_HEAD = "t (ms)" _DUMP_SIZE_COLUMN_HEAD = "Size (B)" _OP_TYPE_COLUMN_HEAD = "Op type" _TENSOR_NAME_COLUMN_HEAD = "Tensor name" def __init__(self, debug_dump): """DebugAnalyzer constructor. Args: debug_dump: A DebugDumpDir object. """ self._debug_dump = debug_dump self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump) # Initialize tensor filters state. self._tensor_filters = {} # Argument parsers for command handlers. self._arg_parsers = {} # Parser for list_tensors. ap = argparse.ArgumentParser( description="List dumped intermediate tensors.", usage=argparse.SUPPRESS) ap.add_argument( "-f", "--tensor_filter", dest="tensor_filter", type=str, default="", help="List only Tensors passing the filter of the specified name") ap.add_argument( "-n", "--node_name_filter", dest="node_name_filter", type=str, default="", help="filter node name by regex.") ap.add_argument( "-t", "--op_type_filter", dest="op_type_filter", type=str, default="", help="filter op type by regex.") ap.add_argument( "-s", "--sort_by", dest="sort_by", type=str, default=SORT_TENSORS_BY_TIMESTAMP, help=("the field to sort the data by: (%s | %s | %s | %s)" % (SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE, SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME))) ap.add_argument( "-r", "--reverse", dest="reverse", action="store_true", help="sort the data in reverse (descending) order") self._arg_parsers["list_tensors"] = ap # Parser for node_info. ap = argparse.ArgumentParser( description="Show information about a node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an associated tensor, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-a", "--attributes", dest="attributes", action="store_true", help="Also list attributes of the node.") ap.add_argument( "-d", "--dumps", dest="dumps", action="store_true", help="Also list dumps available from the node.") ap.add_argument( "-t", "--traceback", dest="traceback", action="store_true", help="Also include the traceback of the node's creation " "(if available in Python).") self._arg_parsers["node_info"] = ap # Parser for list_inputs. ap = argparse.ArgumentParser( description="Show inputs to a node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an output tensor from the node, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-c", "--control", action="store_true", help="Include control inputs.") ap.add_argument( "-d", "--depth", dest="depth", type=int, default=20, help="Maximum depth of recursion used when showing the input tree.") ap.add_argument( "-r", "--recursive", dest="recursive", action="store_true", help="Show inputs to the node recursively, i.e., the input tree.") ap.add_argument( "-t", "--op_type", action="store_true", help="Show op types of input nodes.") self._arg_parsers["list_inputs"] = ap # Parser for list_outputs. ap = argparse.ArgumentParser( description="Show the nodes that receive the outputs of given node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an output tensor from the node, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-c", "--control", action="store_true", help="Include control inputs.") ap.add_argument( "-d", "--depth", dest="depth", type=int, default=20, help="Maximum depth of recursion used when showing the output tree.") ap.add_argument( "-r", "--recursive", dest="recursive", action="store_true", help="Show recipients of the node recursively, i.e., the output " "tree.") ap.add_argument( "-t", "--op_type", action="store_true", help="Show op types of recipient nodes.") self._arg_parsers["list_outputs"] = ap # Parser for print_tensor. self._arg_parsers["print_tensor"] = ( command_parser.get_print_tensor_argparser( "Print the value of a dumped tensor.")) # Parser for print_source. ap = argparse.ArgumentParser( description="Print a Python source file with overlaid debug " "information, including the nodes (ops) or Tensors created at the " "source lines.", usage=argparse.SUPPRESS) ap.add_argument( "source_file_path", type=str, help="Path to the source file.") ap.add_argument( "-t", "--tensors", dest="tensors", action="store_true", help="Label lines with dumped Tensors, instead of ops.") ap.add_argument( "-m", "--max_elements_per_line", type=int, default=10, help="Maximum number of elements (ops or Tensors) to show per source " "line.") ap.add_argument( "-b", "--line_begin", type=int, default=1, help="Print source beginning at line number (1-based.)") self._arg_parsers["print_source"] = ap # Parser for list_source. ap = argparse.ArgumentParser( description="List source files responsible for constructing nodes and " "tensors present in the run().", usage=argparse.SUPPRESS) ap.add_argument( "-p", "--path_filter", type=str, default="", help="Regular expression filter for file path.") ap.add_argument( "-n", "--node_name_filter", type=str, default="", help="Regular expression filter for node name.") self._arg_parsers["list_source"] = ap # Parser for eval. ap = argparse.ArgumentParser( description="""Evaluate an arbitrary expression. Can use tensor values from the current debug dump. The debug tensor names should be enclosed in pairs of backticks. Expressions with spaces should be enclosed in a pair of double quotes or a pair of single quotes. By default, numpy is imported as np and can be used in the expressions. E.g., 1) eval np.argmax(`Softmax:0`), 2) eval 'np.sum(`Softmax:0`, axis=1)', 3) eval "np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)". """, usage=argparse.SUPPRESS) ap.add_argument( "expression", type=str, help="""Expression to be evaluated. 1) in the simplest case, use <node_name>:<output_slot>, e.g., hidden_0/MatMul:0. 2) if the default debug op "DebugIdentity" is to be overridden, use <node_name>:<output_slot>:<debug_op>, e.g., hidden_0/MatMul:0:DebugNumericSummary. 3) if the tensor of the same name exists on more than one device, use <device_name>:<node_name>:<output_slot>[:<debug_op>], e.g., /job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0 /job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount. 4) if the tensor is executed multiple times in a given `Session.run` call, specify the execution index with a 0-based integer enclose in a pair of brackets at the end, e.g., RNN/tanh:0[0] /job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].""") ap.add_argument( "-a", "--all", dest="print_all", action="store_true", help="Print the tensor in its entirety, i.e., do not use ellipses " "(may be slow for large results).") self._arg_parsers["eval"] = ap # TODO(cais): Implement list_nodes. def add_tensor_filter(self, filter_name, filter_callable): """Add a tensor filter. A tensor filter is a named callable of the signature: filter_callable(dump_datum, tensor), wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying metadata about the dumped tensor, including tensor name, timestamps, etc. tensor is the value of the dumped tensor as an numpy.ndarray object. The return value of the function is a bool. This is the same signature as the input argument to debug_data.DebugDumpDir.find(). Args: filter_name: (str) name of the filter. Cannot be empty. filter_callable: (callable) a filter function of the signature described as above. Raises: ValueError: If filter_name is an empty str. TypeError: If filter_name is not a str. Or if filter_callable is not callable. """ if not isinstance(filter_name, str): raise TypeError("Input argument filter_name is expected to be str, " "but is not.") # Check that filter_name is not an empty str. if not filter_name: raise ValueError("Input argument filter_name cannot be empty.") # Check that filter_callable is callable. if not callable(filter_callable): raise TypeError( "Input argument filter_callable is expected to be callable, " "but is not.") self._tensor_filters[filter_name] = filter_callable def get_tensor_filter(self, filter_name): """Retrieve filter function by name. Args: filter_name: Name of the filter set during add_tensor_filter() call. Returns: The callable associated with the filter name. Raises: ValueError: If there is no tensor filter of the specified filter name. """ if filter_name not in self._tensor_filters: raise ValueError("There is no tensor filter named \"%s\"" % filter_name) return self._tensor_filters[filter_name] def get_help(self, handler_name): return self._arg_parsers[handler_name].format_help() def list_tensors(self, args, screen_info=None): """Command handler for list_tensors. List tensors dumped during debugged Session.run() call. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # TODO(cais): Add annotations of substrings for dumped tensor names, to # facilitate on-screen highlighting/selection of node names. _ = screen_info parsed = self._arg_parsers["list_tensors"].parse_args(args) output = [] filter_strs = [] if parsed.op_type_filter: op_type_regex = re.compile(parsed.op_type_filter) filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter) else: op_type_regex = None if parsed.node_name_filter: node_name_regex = re.compile(parsed.node_name_filter) filter_strs.append("Node name regex filter: \"%s\"" % parsed.node_name_filter) else: node_name_regex = None output = debugger_cli_common.RichTextLines(filter_strs) output.append("") if parsed.tensor_filter: try: filter_callable = self.get_tensor_filter(parsed.tensor_filter) except ValueError: output = cli_shared.error("There is no tensor filter named \"%s\"." % parsed.tensor_filter) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output data_to_show = self._debug_dump.find(filter_callable) else: data_to_show = self._debug_dump.dumped_tensor_data # TODO(cais): Implement filter by lambda on tensor value. max_timestamp_width, max_dump_size_width, max_op_type_width = ( self._measure_tensor_list_column_widths(data_to_show)) # Sort the data. data_to_show = self._sort_dump_data_by( data_to_show, parsed.sort_by, parsed.reverse) output.extend( self._tensor_list_column_heads(parsed, max_timestamp_width, max_dump_size_width, max_op_type_width)) dump_count = 0 for dump in data_to_show: if node_name_regex and not node_name_regex.match(dump.node_name): continue if op_type_regex: op_type = self._debug_dump.node_op_type(dump.node_name) if not op_type_regex.match(op_type): continue rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0 dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot) op_type = self._debug_dump.node_op_type(dump.node_name) line = "[%.3f]" % rel_time line += " " * (max_timestamp_width - len(line)) line += dump_size_str line += " " * (max_timestamp_width + max_dump_size_width - len(line)) line += op_type line += " " * (max_timestamp_width + max_dump_size_width + max_op_type_width - len(line)) line += dumped_tensor_name output.append( line, font_attr_segs=[( len(line) - len(dumped_tensor_name), len(line), debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))]) dump_count += 1 if parsed.tensor_filter: output.prepend([ "%d dumped tensor(s) passing filter \"%s\":" % (dump_count, parsed.tensor_filter) ]) else: output.prepend(["%d dumped tensor(s):" % dump_count]) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output def _measure_tensor_list_column_widths(self, data): """Determine the maximum widths of the timestamp and op-type column. This method assumes that data is sorted in the default order, i.e., by ascending timestamps. Args: data: (list of DebugTensorDaum) the data based on which the maximum column widths will be determined. Returns: (int) maximum width of the timestamp column. 0 if data is empty. (int) maximum width of the dump size column. 0 if data is empty. (int) maximum width of the op type column. 0 if data is empty. """ max_timestamp_width = 0 if data: max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0 max_timestamp_width = len("[%.3f] " % max_rel_time_ms) + 1 max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1) max_dump_size_width = 0 for dump in data: dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) if len(dump_size_str) + 1 > max_dump_size_width: max_dump_size_width = len(dump_size_str) + 1 max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1) max_op_type_width = 0 for dump in data: op_type = self._debug_dump.node_op_type(dump.node_name) if len(op_type) + 1 > max_op_type_width: max_op_type_width = len(op_type) + 1 max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1) return max_timestamp_width, max_dump_size_width, max_op_type_width def _sort_dump_data_by(self, data, sort_by, reverse): """Sort a list of DebugTensorDatum in specified order. Args: data: (list of DebugTensorDatum) the data to be sorted. sort_by: The field to sort data by. reverse: (bool) Whether to use reversed (descending) order. Returns: (list of DebugTensorDatum) in sorted order. Raises: ValueError: given an invalid value of sort_by. """ if sort_by == SORT_TENSORS_BY_TIMESTAMP: return sorted( data, reverse=reverse, key=lambda x: x.timestamp) elif sort_by == SORT_TENSORS_BY_DUMP_SIZE: return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes) elif sort_by == SORT_TENSORS_BY_OP_TYPE: return sorted( data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name)) elif sort_by == SORT_TENSORS_BY_TENSOR_NAME: return sorted( data, reverse=reverse, key=lambda x: "%s:%d" % (x.node_name, x.output_slot)) else: raise ValueError("Unsupported key to sort tensors by: %s" % sort_by) def _tensor_list_column_heads(self, parsed, max_timestamp_width, max_dump_size_width, max_op_type_width): """Generate a line containing the column heads of the tensor list. Args: parsed: Parsed arguments (by argparse) of the list_tensors command. max_timestamp_width: (int) maximum width of the timestamp column. max_dump_size_width: (int) maximum width of the dump size column. max_op_type_width: (int) maximum width of the op type column. Returns: A RichTextLines object. """ base_command = "list_tensors" if parsed.tensor_filter: base_command += " -f %s" % parsed.tensor_filter if parsed.op_type_filter: base_command += " -t %s" % parsed.op_type_filter if parsed.node_name_filter: base_command += " -n %s" % parsed.node_name_filter attr_segs = {0: []} row = self._TIMESTAMP_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP) if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse: command += " -r" attr_segs[0].append( (0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * (max_timestamp_width - len(row)) prev_len = len(row) row += self._DUMP_SIZE_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE) if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * (max_dump_size_width + max_timestamp_width - len(row)) prev_len = len(row) row += self._OP_TYPE_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE) if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * ( max_op_type_width + max_dump_size_width + max_timestamp_width - len(row) ) prev_len = len(row) row += self._TENSOR_NAME_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME) if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem("", command), "bold"])) row += " " * ( max_op_type_width + max_dump_size_width + max_timestamp_width - len(row) ) return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs) def node_info(self, args, screen_info=None): """Command handler for node_info. Query information about a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # TODO(cais): Add annotation of substrings for node names, to facilitate # on-screen highlighting/selection of node names. _ = screen_info parsed = self._arg_parsers["node_info"].parse_args(args) # Get a node name, regardless of whether the input is a node name (without # output slot attached) or a tensor name (with output slot attached). node_name, unused_slot = debug_data.parse_node_or_tensor_name( parsed.node_name) if not self._debug_dump.node_exists(node_name): output = cli_shared.error( "There is no node named \"%s\" in the partition graphs" % node_name) _add_main_menu( output, node_name=None, enable_list_tensors=True, enable_node_info=False, enable_list_inputs=False, enable_list_outputs=False) return output # TODO(cais): Provide UI glossary feature to explain to users what the # term "partition graph" means and how it is related to TF graph objects # in Python. The information can be along the line of: # "A tensorflow graph defined in Python is stripped of unused ops # according to the feeds and fetches and divided into a number of # partition graphs that may be distributed among multiple devices and # hosts. The partition graphs are what's actually executed by the C++ # runtime during a run() call." lines = ["Node %s" % node_name] font_attr_segs = { 0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")] } lines.append("") lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name)) lines.append(" Device: %s" % self._debug_dump.node_device(node_name)) output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) # List node inputs (non-control and control). inputs = self._debug_dump.node_inputs(node_name) ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True) output.extend(self._format_neighbors("input", inputs, ctrl_inputs)) # List node output recipients (non-control and control). recs = self._debug_dump.node_recipients(node_name) ctrl_recs = self._debug_dump.node_recipients(node_name, is_control=True) output.extend(self._format_neighbors("recipient", recs, ctrl_recs)) # Optional: List attributes of the node. if parsed.attributes: output.extend(self._list_node_attributes(node_name)) # Optional: List dumps available from the node. if parsed.dumps: output.extend(self._list_node_dumps(node_name)) if parsed.traceback: output.extend(self._render_node_traceback(node_name)) _add_main_menu(output, node_name=node_name, enable_node_info=False) return output def _render_node_traceback(self, node_name): """Render traceback of a node's creation in Python, if available. Args: node_name: (str) name of the node. Returns: A RichTextLines object containing the stack trace of the node's construction. """ lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")] try: node_stack = self._debug_dump.node_traceback(node_name) for depth, (file_path, line, function_name, text) in enumerate( node_stack): lines.append("%d: %s" % (depth, file_path)) attribute = debugger_cli_common.MenuItem( "", "ps %s -b %d" % (file_path, line)) if text else None line_number_line = RL(" ") line_number_line += RL("Line: %d" % line, attribute) lines.append(line_number_line) lines.append(" Function: %s" % function_name) lines.append(" Text: " + (("\"%s\"" % text) if text else "None")) lines.append("") except KeyError: lines.append("(Node unavailable in the loaded Python graph)") except LookupError: lines.append("(Unavailable because no Python graph has been loaded)") return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) def list_inputs(self, args, screen_info=None): """Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # Screen info not currently used by this handler. Include this line to # mute pylint. _ = screen_info # TODO(cais): Use screen info to format the output lines more prettily, # e.g., hanging indent of long node names. parsed = self._arg_parsers["list_inputs"].parse_args(args) output = self._list_inputs_or_outputs( parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=False) node_name = debug_data.get_node_name(parsed.node_name) _add_main_menu(output, node_name=node_name, enable_list_inputs=False) return output def print_tensor(self, args, screen_info=None): """Command handler for print_tensor. Print value of a given dumped tensor. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ parsed = self._arg_parsers["print_tensor"].parse_args(args) np_printoptions = cli_shared.numpy_printoptions_from_screen_info( screen_info) # Determine if any range-highlighting is required. highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges) tensor_name, tensor_slicing = ( command_parser.parse_tensor_name_with_slicing(parsed.tensor_name)) node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name) if (self._debug_dump.loaded_partition_graphs() and not self._debug_dump.node_exists(node_name)): output = cli_shared.error( "Node \"%s\" does not exist in partition graphs" % node_name) _add_main_menu( output, node_name=None, enable_list_tensors=True, enable_print_tensor=False) return output watch_keys = self._debug_dump.debug_watch_keys(node_name) if output_slot is None: output_slots = set() for watch_key in watch_keys: output_slots.add(int(watch_key.split(":")[1])) if len(output_slots) == 1: # There is only one dumped tensor from this node, so there is no # ambiguity. Proceed to show the only dumped tensor. output_slot = list(output_slots)[0] else: # There are more than one dumped tensors from this node. Indicate as # such. # TODO(cais): Provide an output screen with command links for # convenience. lines = [ "Node \"%s\" generated debug dumps from %s output slots:" % (node_name, len(output_slots)), "Please specify the output slot: %s:x." % node_name ] output = debugger_cli_common.RichTextLines(lines) _add_main_menu( output, node_name=node_name, enable_list_tensors=True, enable_print_tensor=False) return output # Find debug dump data that match the tensor name (node name + output # slot). matching_data = [] for watch_key in watch_keys: debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key) for datum in debug_tensor_data: if datum.output_slot == output_slot: matching_data.append(datum) if not matching_data: # No dump for this tensor. output = cli_shared.error("Tensor \"%s\" did not generate any dumps." % parsed.tensor_name) elif len(matching_data) == 1: # There is only one dump for this tensor. if parsed.number <= 0: output = cli_shared.format_tensor( matching_data[0].get_tensor(), matching_data[0].watch_key, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options, include_numeric_summary=parsed.numeric_summary) else: output = cli_shared.error( "Invalid number (%d) for tensor %s, which generated one dump." % (parsed.number, parsed.tensor_name)) _add_main_menu(output, node_name=node_name, enable_print_tensor=False) else: # There are more than one dumps for this tensor. if parsed.number < 0: lines = [ "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name, len(matching_data)) ] font_attr_segs = {} for i, datum in enumerate(matching_data): rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0 lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key)) command = "print_tensor %s -n %d" % (parsed.tensor_name, i) font_attr_segs[len(lines) - 1] = [( len(lines[-1]) - len(datum.watch_key), len(lines[-1]), debugger_cli_common.MenuItem(None, command))] lines.append("") lines.append( "You can use the -n (--number) flag to specify which dump to " "print.") lines.append("For example:") lines.append(" print_tensor %s -n 0" % parsed.tensor_name) output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) elif parsed.number >= len(matching_data): output = cli_shared.error( "Specified number (%d) exceeds the number of available dumps " "(%d) for tensor %s" % (parsed.number, len(matching_data), parsed.tensor_name)) else: output = cli_shared.format_tensor( matching_data[parsed.number].get_tensor(), matching_data[parsed.number].watch_key + " (dump #%d)" % parsed.number, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options) _add_main_menu(output, node_name=node_name, enable_print_tensor=False) return output def list_outputs(self, args, screen_info=None): """Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # Screen info not currently used by this handler. Include this line to # mute pylint. _ = screen_info # TODO(cais): Use screen info to format the output lines more prettily, # e.g., hanging indent of long node names. parsed = self._arg_parsers["list_outputs"].parse_args(args) output = self._list_inputs_or_outputs( parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=True) node_name = debug_data.get_node_name(parsed.node_name) _add_main_menu(output, node_name=node_name, enable_list_outputs=False) return output def evaluate_expression(self, args, screen_info=None): parsed = self._arg_parsers["eval"].parse_args(args) eval_res = self._evaluator.evaluate(parsed.expression) np_printoptions = cli_shared.numpy_printoptions_from_screen_info( screen_info) return cli_shared.format_tensor( eval_res, "from eval of expression '%s'" % parsed.expression, np_printoptions, print_all=parsed.print_all, include_numeric_summary=True) def _reconstruct_print_source_command(self, parsed, line_begin, max_elements_per_line_increase=0): return "ps %s %s -b %d -m %d" % ( parsed.source_file_path, "-t" if parsed.tensors else "", line_begin, parsed.max_elements_per_line + max_elements_per_line_increase) def print_source(self, args, screen_info=None): """Print the content of a source file.""" del screen_info # Unused. parsed = self._arg_parsers["print_source"].parse_args(args) source_annotation = source_utils.annotate_source( self._debug_dump, parsed.source_file_path, do_dumped_tensors=parsed.tensors) source_lines, line_num_width = source_utils.load_source( parsed.source_file_path) labeled_source_lines = [] actual_initial_scroll_target = 0 for i, line in enumerate(source_lines): annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW) annotated_line += " " * (line_num_width - len(annotated_line)) annotated_line += line labeled_source_lines.append(annotated_line) if i + 1 == parsed.line_begin: actual_initial_scroll_target = len(labeled_source_lines) - 1 if i + 1 in source_annotation: sorted_elements = sorted(source_annotation[i + 1]) for k, element in enumerate(sorted_elements): if k >= parsed.max_elements_per_line: omitted_info_line = RL(" (... Omitted %d of %d %s ...) " % ( len(sorted_elements) - parsed.max_elements_per_line, len(sorted_elements), "tensor(s)" if parsed.tensors else "op(s)")) omitted_info_line += RL( "+5", debugger_cli_common.MenuItem( None, self._reconstruct_print_source_command( parsed, i + 1, max_elements_per_line_increase=5))) labeled_source_lines.append(omitted_info_line) break label = RL(" " * 4) if self._debug_dump.debug_watch_keys( debug_data.get_node_name(element)): attribute = debugger_cli_common.MenuItem("", "pt %s" % element) else: attribute = cli_shared.COLOR_BLUE label += RL(element, attribute) labeled_source_lines.append(label) output = debugger_cli_common.rich_text_lines_from_rich_line_list( labeled_source_lines, annotations={debugger_cli_common.INIT_SCROLL_POS_KEY: actual_initial_scroll_target}) _add_main_menu(output, node_name=None) return output def _make_source_table(self, source_list, is_tf_py_library): """Make a table summarizing the source files that create nodes and tensors. Args: source_list: List of source files and related information as a list of tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps, first_line). is_tf_py_library: (`bool`) whether this table is for files that belong to the TensorFlow Python library. Returns: The table as a `debugger_cli_common.RichTextLines` object. """ path_head = "Source file path" num_nodes_head = "#(nodes)" num_tensors_head = "#(tensors)" num_dumps_head = "#(tensor dumps)" if is_tf_py_library: # Use color to mark files that are guessed to belong to TensorFlow Python # library. color = cli_shared.COLOR_GRAY lines = [RL("TensorFlow Python library file(s):", color)] else: color = cli_shared.COLOR_WHITE lines = [RL("File(s) outside TensorFlow Python library:", color)] if not source_list: lines.append(RL("[No files.]")) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) path_column_width = max( max([len(item[0]) for item in source_list]), len(path_head)) + 1 num_nodes_column_width = max( max([len(str(item[2])) for item in source_list]), len(num_nodes_head)) + 1 num_tensors_column_width = max( max([len(str(item[3])) for item in source_list]), len(num_tensors_head)) + 1 head = RL(path_head + " " * (path_column_width - len(path_head)), color) head += RL(num_nodes_head + " " * ( num_nodes_column_width - len(num_nodes_head)), color) head += RL(num_tensors_head + " " * ( num_tensors_column_width - len(num_tensors_head)), color) head += RL(num_dumps_head, color) lines.append(head) for (file_path, _, num_nodes, num_tensors, num_dumps, first_line_num) in source_list: path_attributes = [color] if source_utils.is_extension_uncompiled_python_source(file_path): path_attributes.append( debugger_cli_common.MenuItem(None, "ps %s -b %d" % (file_path, first_line_num))) line = RL(file_path, path_attributes) line += " " * (path_column_width - len(line)) line += RL( str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))), color) line += RL( str(num_tensors) + " " * (num_tensors_column_width - len(str(num_tensors))), color) line += RL(str(num_dumps), color) lines.append(line) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) def list_source(self, args, screen_info=None): """List Python source files that constructed nodes and tensors.""" del screen_info # Unused. parsed = self._arg_parsers["list_source"].parse_args(args) source_list = source_utils.list_source_files_against_dump( self._debug_dump, path_regex_whitelist=parsed.path_filter, node_name_regex_whitelist=parsed.node_name_filter) top_lines = [ RL("List of source files that created nodes in this run", "bold")] if parsed.path_filter: top_lines.append( RL("File path regex filter: \"%s\"" % parsed.path_filter)) if parsed.node_name_filter: top_lines.append( RL("Node name regex filter: \"%s\"" % parsed.node_name_filter)) top_lines.append(RL()) output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines) if not source_list: output.append("[No source file information.]") return output output.extend(self._make_source_table( [item for item in source_list if not item[1]], False)) output.extend(self._make_source_table( [item for item in source_list if item[1]], True)) _add_main_menu(output, node_name=None) return output def _list_inputs_or_outputs(self, recursive, node_name, depth, control, op_type, do_outputs=False): """Helper function used by list_inputs and list_outputs. Format a list of lines to display the inputs or output recipients of a given node. Args: recursive: Whether the listing is to be done recursively, as a boolean. node_name: The name of the node in question, as a str. depth: Maximum recursion depth, applies only if recursive == True, as an int. control: Whether control inputs or control recipients are included, as a boolean. op_type: Whether the op types of the nodes are to be included, as a boolean. do_outputs: Whether recipients, instead of input nodes are to be listed, as a boolean. Returns: Input or recipient tree formatted as a RichTextLines object. """ if do_outputs: tracker = self._debug_dump.node_recipients type_str = "Recipients of" short_type_str = "recipients" else: tracker = self._debug_dump.node_inputs type_str = "Inputs to" short_type_str = "inputs" lines = [] font_attr_segs = {} # Check if this is a tensor name, instead of a node name. node_name, _ = debug_data.parse_node_or_tensor_name(node_name) # Check if node exists. if not self._debug_dump.node_exists(node_name): return cli_shared.error( "There is no node named \"%s\" in the partition graphs" % node_name) if recursive: max_depth = depth else: max_depth = 1 if control: include_ctrls_str = ", control %s included" % short_type_str else: include_ctrls_str = "" line = "%s node \"%s\"" % (type_str, node_name) font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold") ] lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str )) command_template = "lo -c -r %s" if do_outputs else "li -c -r %s" self._dfs_from_node( lines, font_attr_segs, node_name, tracker, max_depth, 1, [], control, op_type, command_template=command_template) # Include legend. lines.append("") lines.append("Legend:") lines.append(" (d): recursion depth = d.") if control: lines.append(" (Ctrl): Control input.") if op_type: lines.append(" [Op]: Input node has op type Op.") # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes. return debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) def _dfs_from_node(self, lines, attr_segs, node_name, tracker, max_depth, depth, unfinished, include_control=False, show_op_type=False, command_template=None): """Perform depth-first search (DFS) traversal of a node's input tree. It recursively tracks the inputs (or output recipients) of the node called node_name, and append these inputs (or output recipients) to a list of text lines (lines) with proper indentation that reflects the recursion depth, together with some formatting attributes (to attr_segs). The formatting attributes can include command shortcuts, for example. Args: lines: Text lines to append to, as a list of str. attr_segs: (dict) Attribute segments dictionary to append to. node_name: Name of the node, as a str. This arg is updated during the recursion. tracker: A callable that takes one str as the node name input and returns a list of str as the inputs/outputs. This makes it this function general enough to be used with both node-input and node-output tracking. max_depth: Maximum recursion depth, as an int. depth: Current recursion depth. This arg is updated during the recursion. unfinished: A stack of unfinished recursion depths, as a list of int. include_control: Whether control dependencies are to be included as inputs (and marked as such). show_op_type: Whether op type of the input nodes are to be displayed alongside the nodes' names. command_template: (str) Template for command shortcut of the node names. """ # Make a shallow copy of the list because it may be extended later. all_inputs = copy.copy(tracker(node_name, is_control=False)) is_ctrl = [False] * len(all_inputs) if include_control: # Sort control inputs or recipients in alphabetical order of the node # names. ctrl_inputs = sorted(tracker(node_name, is_control=True)) all_inputs.extend(ctrl_inputs) is_ctrl.extend([True] * len(ctrl_inputs)) if not all_inputs: if depth == 1: lines.append(" [None]") return unfinished.append(depth) # Create depth-dependent hanging indent for the line. hang = "" for k in xrange(depth): if k < depth - 1: if k + 1 in unfinished: hang += HANG_UNFINISHED else: hang += HANG_FINISHED else: hang += HANG_SUFFIX if all_inputs and depth > max_depth: lines.append(hang + ELLIPSIS) unfinished.pop() return hang += DEPTH_TEMPLATE % depth for i in xrange(len(all_inputs)): inp = all_inputs[i] if is_ctrl[i]: ctrl_str = CTRL_LABEL else: ctrl_str = "" op_type_str = "" if show_op_type: op_type_str = OP_TYPE_TEMPLATE % self._debug_dump.node_op_type(inp) if i == len(all_inputs) - 1: unfinished.pop() line = hang + ctrl_str + op_type_str + inp lines.append(line) if command_template: attr_segs[len(lines) - 1] = [( len(line) - len(inp), len(line), debugger_cli_common.MenuItem(None, command_template % inp))] # Recursive call. # The input's/output's name can be a tensor name, in the case of node # with >1 output slots. inp_node_name, _ = debug_data.parse_node_or_tensor_name(inp) self._dfs_from_node( lines, attr_segs, inp_node_name, tracker, max_depth, depth + 1, unfinished, include_control=include_control, show_op_type=show_op_type, command_template=command_template) def _format_neighbors(self, neighbor_type, non_ctrls, ctrls): """List neighbors (inputs or recipients) of a node. Args: neighbor_type: ("input" | "recipient") non_ctrls: Non-control neighbor node names, as a list of str. ctrls: Control neighbor node names, as a list of str. Returns: A RichTextLines object. """ # TODO(cais): Return RichTextLines instead, to allow annotation of node # names. lines = [] font_attr_segs = {} lines.append("") lines.append(" %d %s(s) + %d control %s(s):" % (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type)) lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type)) for non_ctrl in non_ctrls: line = " [%s] %s" % (self._debug_dump.node_op_type(non_ctrl), non_ctrl) lines.append(line) font_attr_segs[len(lines) - 1] = [( len(line) - len(non_ctrl), len(line), debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))] if ctrls: lines.append("") lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type)) for ctrl in ctrls: line = " [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl) lines.append(line) font_attr_segs[len(lines) - 1] = [( len(line) - len(ctrl), len(line), debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))] return debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) def _list_node_attributes(self, node_name): """List neighbors (inputs or recipients) of a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object. """ lines = [] lines.append("") lines.append("Node attributes:") attrs = self._debug_dump.node_attributes(node_name) for attr_key in attrs: lines.append(" %s:" % attr_key) attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ") lines.append(" %s" % attr_val_str) lines.append("") return debugger_cli_common.RichTextLines(lines) def _list_node_dumps(self, node_name): """List dumped tensor data from a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object. """ lines = [] font_attr_segs = {} watch_keys = self._debug_dump.debug_watch_keys(node_name) dump_count = 0 for watch_key in watch_keys: debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key) for datum in debug_tensor_data: line = " Slot %d @ %s @ %.3f ms" % ( datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0) lines.append(line) command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count) font_attr_segs[len(lines) - 1] = [( 2, len(line), debugger_cli_common.MenuItem(None, command))] dump_count += 1 output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) output_with_header = debugger_cli_common.RichTextLines( ["%d dumped tensor(s):" % dump_count, ""]) output_with_header.extend(output) return output_with_header def create_analyzer_ui(debug_dump, tensor_filters=None, ui_type="curses", on_ui_exit=None): """Create an instance of CursesUI based on a DebugDumpDir object. Args: debug_dump: (debug_data.DebugDumpDir) The debug dump to use. tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor filter (Callable). ui_type: (str) requested UI type, e.g., "curses", "readline". on_ui_exit: (`Callable`) the callback to be called when the UI exits. Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered. """ analyzer = DebugAnalyzer(debug_dump) if tensor_filters: for tensor_filter_name in tensor_filters: analyzer.add_tensor_filter( tensor_filter_name, tensor_filters[tensor_filter_name]) cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit) cli.register_command_handler( "list_tensors", analyzer.list_tensors, analyzer.get_help("list_tensors"), prefix_aliases=["lt"]) cli.register_command_handler( "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]) cli.register_command_handler( "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]) cli.register_command_handler( "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"]) cli.register_command_handler( "print_tensor", analyzer.print_tensor, analyzer.get_help("print_tensor"), prefix_aliases=["pt"]) cli.register_command_handler( "print_source", analyzer.print_source, analyzer.get_help("print_source"), prefix_aliases=["ps"]) cli.register_command_handler( "list_source", analyzer.list_source, analyzer.get_help("list_source"), prefix_aliases=["ls"]) cli.register_command_handler( "eval", analyzer.evaluate_expression, analyzer.get_help("eval"), prefix_aliases=["ev"]) dumped_tensor_names = [] for datum in debug_dump.dumped_tensor_data: dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot)) # Tab completions for command "print_tensors". cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names) return cli
apache-2.0
-3,123,995,718,131,566,000
34.285804
80
0.610338
false
joshmoore/openmicroscopy
components/tools/OmeroWeb/omeroweb/webclient/controller/container.py
1
57620
#!/usr/bin/env python # # # # Copyright (c) 2008-2011 University of Dundee. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008. # # Version: 1.0 # import omero from omero.rtypes import * from django.core.urlresolvers import reverse from django.utils.encoding import smart_str import logging logger = logging.getLogger('web-container') from webclient.controller import BaseController class BaseContainer(BaseController): project = None screen = None dataset = None plate = None acquisition = None well = None image = None tag = None file = None comment = None tags = None index = None containers = None experimenter = None c_size = 0 text_annotations = None txannSize = 0 long_annotations = None file_annotations = None orphaned = False def __init__(self, conn, project=None, dataset=None, image=None, screen=None, plate=None, acquisition=None, well=None, tag=None, tagset=None, file=None, comment=None, annotation=None, index=None, orphaned=None, **kw): BaseController.__init__(self, conn) if project is not None: self.project = self.conn.getObject("Project", project) if self.project is None: raise AttributeError("We are sorry, but that project (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(project)) if self.project._obj is None: raise AttributeError("We are sorry, but that project (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(project)) if dataset is not None: self.dataset = self.conn.getObject("Dataset", dataset) if self.dataset is None: raise AttributeError("We are sorry, but that dataset (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(dataset)) if self.dataset._obj is None: raise AttributeError("We are sorry, but that dataset (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(dataset)) if screen is not None: self.screen = self.conn.getObject("Screen", screen) if self.screen is None: raise AttributeError("We are sorry, but that screen (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(screen)) if self.screen._obj is None: raise AttributeError("We are sorry, but that screen (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(screen)) if plate is not None: self.plate = self.conn.getObject("Plate", plate) if self.plate is None: raise AttributeError("We are sorry, but that plate (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(plate)) if self.plate._obj is None: raise AttributeError("We are sorry, but that plate (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(plate)) if acquisition is not None: self.acquisition = self.conn.getObject("PlateAcquisition", acquisition) if self.acquisition is None: raise AttributeError("We are sorry, but that plate acquisition (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(acquisition)) if self.acquisition._obj is None: raise AttributeError("We are sorry, but that plate acquisition (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(acquisition)) if image is not None: self.image = self.conn.getObject("Image", image) if self.image is None: raise AttributeError("We are sorry, but that image (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(image)) if self.image._obj is None: raise AttributeError("We are sorry, but that image (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(image)) if well is not None: self.well = self.conn.getObject("Well", well) if self.well is None: raise AttributeError("We are sorry, but that well (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(well)) if self.well._obj is None: raise AttributeError("We are sorry, but that well (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(well)) if index is not None: self.well.index = index if tag is not None: self.tag = self.conn.getObject("Annotation", tag) if self.tag is None: raise AttributeError("We are sorry, but that tag (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(tag)) if self.tag._obj is None: raise AttributeError("We are sorry, but that tag (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(tag)) if tagset is not None: self.tag = self.conn.getObject("Annotation", tagset) if self.tag is None: raise AttributeError("We are sorry, but that tag (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(tag)) if self.tag._obj is None: raise AttributeError("We are sorry, but that tag (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(tag)) if comment is not None: self.comment = self.conn.getObject("Annotation", comment) if self.comment is None: raise AttributeError("We are sorry, but that comment (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(comment)) if self.comment._obj is None: raise AttributeError("We are sorry, but that comment (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(comment)) if file is not None: self.file = self.conn.getObject("Annotation", file) if self.file is None: raise AttributeError("We are sorry, but that file (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(file)) if self.file._obj is None: raise AttributeError("We are sorry, but that file (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(file)) if annotation is not None: self.annotation = self.conn.getObject("Annotation", annotation) if self.annotation is None: raise AttributeError("We are sorry, but that annotation (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(annotation)) if self.annotation._obj is None: raise AttributeError("We are sorry, but that annotation (id:%s) does not exist, or if it does, you have no permission to see it. Contact the user you think might share that data with you." % str(annotation)) if orphaned: self.orphaned = True def openAstexViewerCompatible(self): """ Is the image suitable to be viewed with the Volume viewer 'Open Astex Viewer' applet? Image must be a 'volume' of suitable dimensions and not too big. """ from django.conf import settings MAX_SIDE = settings.OPEN_ASTEX_MAX_SIDE # default is 400 MIN_SIDE = settings.OPEN_ASTEX_MIN_SIDE # default is 20 MAX_VOXELS = settings.OPEN_ASTEX_MAX_VOXELS # default is 15625000 (250 * 250 * 250) print "Max side, min side, max voxels", MAX_SIDE, MIN_SIDE, MAX_VOXELS if self.image is None: return False sizeZ = self.image.getSizeZ() if self.image.getSizeC() > 1: return False sizeX = self.image.getSizeX() sizeY = self.image.getSizeY() if sizeZ < MIN_SIDE or sizeX < MIN_SIDE or sizeY < MIN_SIDE: return False if sizeX > MAX_SIDE or sizeY > MAX_SIDE or sizeZ > MAX_SIDE: return False voxelCount = (sizeX * sizeY * sizeZ) if voxelCount > MAX_VOXELS: return False try: # if scipy ndimage is not available for interpolation, can only handle smaller images import scipy.ndimage except ImportError: logger.debug("Failed to import scipy.ndimage - Open Astex Viewer limited to display of smaller images.") MAX_VOXELS = (160 * 160 * 160) if voxelCount > MAX_VOXELS: return False return True def formatMetadataLine(self, l): if len(l) < 1: return None return l.split("=") def originalMetadata(self): # TODO: hardcoded values. self.global_metadata = list() self.series_metadata = list() if self.image is not None: om = self.image.loadOriginalMetadata() elif self.well.getWellSample().image is not None: om = self.well.getWellSample().image().loadOriginalMetadata() if om is not None: self.original_metadata = om[0] self.global_metadata = om[1] self.series_metadata = om[2] def channelMetadata(self): self.channel_metadata = None try: if self.image is not None: self.channel_metadata = self.image.getChannels() elif self.well is not None: self.channel_metadata = self.well.getWellSample().image().getChannels() except: pass if self.channel_metadata is None: self.channel_metadata = list() def loadTags(self, eid=None): if eid is not None: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId self.tags = list(self.conn.listTags(eid)) self.t_size = len(self.tags) def loadDataByTag(self): pr_list = list(self.conn.getObjectsByAnnotations('Project',[self.tag.id])) ds_list = list(self.conn.getObjectsByAnnotations('Dataset',[self.tag.id])) im_list = list(self.conn.getObjectsByAnnotations('Image',[self.tag.id])) sc_list = list(self.conn.getObjectsByAnnotations('Screen',[self.tag.id])) pl_list = list(self.conn.getObjectsByAnnotations('Plate',[self.tag.id])) pr_list_with_counters = list() ds_list_with_counters = list() im_list_with_counters = list() sc_list_with_counters = list() pl_list_with_counters = list() pr_ids = [pr.id for pr in pr_list] if len(pr_ids) > 0: pr_annotation_counter = self.conn.getCollectionCount("Project", "annotationLinks", pr_ids) for pr in pr_list: pr.annotation_counter = pr_annotation_counter.get(pr.id) pr_list_with_counters.append(pr) ds_ids = [ds.id for ds in ds_list] if len(ds_ids) > 0: ds_annotation_counter = self.conn.getCollectionCount("Dataset", "annotationLinks", ds_ids) for ds in ds_list: ds.annotation_counter = ds_annotation_counter.get(ds.id) ds_list_with_counters.append(ds) im_ids = [im.id for im in im_list] if len(im_ids) > 0: im_annotation_counter = self.conn.getCollectionCount("Image", "annotationLinks", im_ids) for im in im_list: im.annotation_counter = im_annotation_counter.get(im.id) im_list_with_counters.append(im) sc_ids = [sc.id for sc in sc_list] if len(sc_ids) > 0: sc_annotation_counter = self.conn.getCollectionCount("Screen", "annotationLinks", sc_ids) for sc in sc_list: sc.annotation_counter = sc_annotation_counter.get(sc.id) sc_list_with_counters.append(sc) pl_ids = [pl.id for pl in pl_list] if len(pl_ids) > 0: pl_annotation_counter = self.conn.getCollectionCount("Plate", "annotationLinks", pl_ids) for pl in pl_list: pl.annotation_counter = pl_annotation_counter.get(pl.id) pl_list_with_counters.append(pl) self.containers={'projects': pr_list_with_counters, 'datasets': ds_list_with_counters, 'images': im_list_with_counters, 'screens':sc_list_with_counters, 'plates':pl_list_with_counters} self.c_size = len(pr_list_with_counters)+len(ds_list_with_counters)+len(im_list_with_counters)+len(sc_list_with_counters)+len(pl_list_with_counters) def listImagesInDataset(self, did, eid=None, page=None): if eid is not None: self.experimenter = self.conn.getObject("Experimenter", eid) im_list = list(self.conn.listImagesInDataset(oid=did, eid=eid, page=page)) # Not displaying annotation icons (same as Insight). #5514. #im_list_with_counters = list() #im_ids = [im.id for im in im_list] #if len(im_ids) > 0: # im_annotation_counter = self.conn.getCollectionCount("Image", "annotationLinks", im_ids) # for im in im_list: # im.annotation_counter = im_annotation_counter.get(im.id) # im_list_with_counters.append(im) im_list_with_counters = im_list im_list_with_counters.sort(key=lambda x: x.getName().lower()) self.containers = {'images': im_list_with_counters} self.c_size = self.conn.getCollectionCount("Dataset", "imageLinks", [long(did)])[long(did)] if page is not None: self.paging = self.doPaging(page, len(im_list_with_counters), self.c_size) def listContainerHierarchy(self, eid=None): if eid is not None: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId pr_list = list(self.conn.listProjects(eid)) ds_list = list(self.conn.listOrphans("Dataset", eid)) sc_list = list(self.conn.listScreens(eid)) pl_list = list(self.conn.listOrphans("Plate", eid)) pr_list_with_counters = list() ds_list_with_counters = list() sc_list_with_counters = list() pl_list_with_counters = list() pr_ids = [pr.id for pr in pr_list] if len(pr_ids) > 0: pr_annotation_counter = self.conn.getCollectionCount("Project", "annotationLinks", pr_ids) for pr in pr_list: pr.annotation_counter = pr_annotation_counter.get(pr.id) pr_list_with_counters.append(pr) ds_ids = [ds.id for ds in ds_list] if len(ds_ids) > 0: ds_annotation_counter = self.conn.getCollectionCount("Dataset", "annotationLinks", ds_ids) for ds in ds_list: ds.annotation_counter = ds_annotation_counter.get(ds.id) ds_list_with_counters.append(ds) sc_ids = [sc.id for sc in sc_list] if len(sc_ids) > 0: sc_annotation_counter = self.conn.getCollectionCount("Screen", "annotationLinks", sc_ids) for sc in sc_list: sc.annotation_counter = sc_annotation_counter.get(sc.id) sc_list_with_counters.append(sc) pl_ids = [pl.id for pl in pl_list] if len(pl_ids) > 0: pl_annotation_counter = self.conn.getCollectionCount("Plate", "annotationLinks", ds_ids) for pl in pl_list: pl.annotation_counter = pl_annotation_counter.get(pl.id) pl_list_with_counters.append(pl) pr_list_with_counters.sort(key=lambda x: x.getName() and x.getName().lower()) ds_list_with_counters.sort(key=lambda x: x.getName() and x.getName().lower()) sc_list_with_counters.sort(key=lambda x: x.getName() and x.getName().lower()) pl_list_with_counters.sort(key=lambda x: x.getName() and x.getName().lower()) self.orphans = self.conn.countOrphans("Image", eid) self.containers={'projects': pr_list_with_counters, 'datasets': ds_list_with_counters, 'screens': sc_list_with_counters, 'plates': pl_list_with_counters} self.c_size = len(pr_list_with_counters)+len(ds_list_with_counters)+len(sc_list_with_counters)+len(pl_list_with_counters) def listOrphanedImages(self, eid=None, page=None): if eid is not None: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId im_list = list(self.conn.listOrphans("Image", eid=eid, page=page)) # Not displaying annotation icons (same as Insight). #5514. #im_list_with_counters = list() #im_ids = [im.id for im in im_list] #if len(im_ids) > 0: #im_annotation_counter = self.conn.getCollectionCount("Image", "annotationLinks", im_ids) #for im in im_list: #im.annotation_counter = im_annotation_counter.get(im.id) #im_list_with_counters.append(im) im_list_with_counters = im_list im_list_with_counters.sort(key=lambda x: x.getName().lower()) self.containers = {'orphaned': True, 'images': im_list_with_counters} self.c_size = self.conn.countOrphans("Image", eid=eid) if page is not None: self.paging = self.doPaging(page, len(im_list_with_counters), self.c_size) # Annotation list def annotationList(self): self.text_annotations = list() self.rating_annotations = list() self.file_annotations = list() self.tag_annotations = list() self.xml_annotations = list() self.boolean_annotations = list() self.double_annotations = list() self.long_annotations = list() self.term_annotations = list() self.time_annotations = list() self.companion_files = list() annTypes = {omero.model.CommentAnnotationI: self.text_annotations, omero.model.LongAnnotationI: self.long_annotations, omero.model.FileAnnotationI: self.file_annotations, omero.model.TagAnnotationI: self.tag_annotations, omero.model.XmlAnnotationI: self.xml_annotations, omero.model.BooleanAnnotationI: self.boolean_annotations, omero.model.DoubleAnnotationI: self.double_annotations, omero.model.TermAnnotationI: self.term_annotations, omero.model.TimestampAnnotationI: self.time_annotations} aList = list() if self.image is not None: aList = list(self.image.listAnnotations()) elif self.dataset is not None: aList = list(self.dataset.listAnnotations()) elif self.project is not None: aList = list(self.project.listAnnotations()) elif self.screen is not None: aList = list(self.screen.listAnnotations()) elif self.plate is not None: aList = list(self.plate.listAnnotations()) elif self.acquisition is not None: aList = list(self.acquisition.listAnnotations()) elif self.well is not None: aList = list(self.well.getWellSample().image().listAnnotations()) for ann in aList: annClass = ann._obj.__class__ if annClass in annTypes: if ann.ns == omero.constants.metadata.NSINSIGHTRATING: self.rating_annotations.append(ann) elif ann.ns == omero.constants.namespaces.NSCOMPANIONFILE: if ann.getFileName != omero.constants.annotation.file.ORIGINALMETADATA: self.companion_files.append(ann) else: annTypes[annClass].append(ann) self.text_annotations.sort(key=lambda x: x.creationEventDate(), reverse=True) self.file_annotations.sort(key=lambda x: x.creationEventDate()) self.rating_annotations.sort(key=lambda x: x.creationEventDate()) self.tag_annotations.sort(key=lambda x: x.textValue) self.txannSize = len(self.text_annotations) self.fileannSize = len(self.file_annotations) self.tgannSize = len(self.tag_annotations) def getTagsByObject(self): eid = self.conn.getGroupFromContext().isReadOnly() and self.conn.getEventContext().userId or None if self.image is not None: return list(self.image.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.dataset is not None: return list(self.dataset.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.project is not None: return list(self.project.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.well is not None: return list(self.well.getWellSample().image().listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.plate is not None: return list(self.plate.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.screen is not None: return list(self.screen.listOrphanedAnnotations(eid=eid, anntype='Tag')) else: eid = self.conn.getGroupFromContext().isReadOnly() and self.conn.getEventContext().userId or None if eid is not None: params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = omero.rtypes.rlong(eid) return list(self.conn.getObjects("TagAnnotation", params=params)) return list(self.conn.getObjects("TagAnnotation")) def getFilesByObject(self): eid = self.conn.getGroupFromContext().isReadOnly() and self.conn.getEventContext().userId or None ns = [omero.constants.namespaces.NSCOMPANIONFILE, omero.constants.namespaces.NSEXPERIMENTERPHOTO] if self.image is not None: return list(self.image.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.dataset is not None: return list(self.dataset.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.project is not None: return list(self.project.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.well is not None: return list(self.well.getWellSample().image().listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.plate is not None: return list(self.plate.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.screen is not None: return list(self.screen.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) else: eid = self.conn.getGroupFromContext().isReadOnly() and self.conn.getEventContext().userId or None if eid is not None: params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = omero.rtypes.rlong(eid) return list(self.conn.listFileAnnotations(params=params)) return list(self.conn.listFileAnnotations()) #################################################################### # Creation def createDataset(self, name, description=None): ds = omero.model.DatasetI() ds.name = rstring(str(name)) if description is not None and description != "" : ds.description = rstring(str(description)) if self.project is not None: l_ds = omero.model.ProjectDatasetLinkI() l_ds.setParent(self.project._obj) l_ds.setChild(ds) ds.addProjectDatasetLink(l_ds) return self.conn.saveAndReturnId(ds) def createProject(self, name, description=None): pr = omero.model.ProjectI() pr.name = rstring(str(name)) if description is not None and description != "" : pr.description = rstring(str(description)) return self.conn.saveAndReturnId(pr) def createScreen(self, name, description=None): sc = omero.model.ScreenI() sc.name = rstring(str(name)) if description is not None and description != "" : sc.description = rstring(str(description)) return self.conn.saveAndReturnId(sc) # Comment annotation def createCommentAnnotation(self, otype, content): otype = str(otype).lower() if not otype in ("project", "dataset", "image", "screen", "plate", "acquisition", "well"): raise AttributeError("Object type must be: project, dataset, image, screen, plate, acquisition, well. ") if otype == 'well': otype = 'Image' selfobject = self.well.getWellSample().image() elif otype == 'acquisition': otype = 'PlateAcquisition' selfobject = self.acquisition else: selfobject = getattr(self, otype) otype = otype.title() ann = omero.model.CommentAnnotationI() ann.textValue = rstring(str(content)) l_ann = getattr(omero.model, otype+"AnnotationLinkI")() l_ann.setParent(selfobject._obj) l_ann.setChild(ann) self.conn.saveObject(l_ann) # Tag annotation def createTagAnnotationOnly(self, tag, desc): ann = None try: ann = self.conn.findTag(tag, desc)._obj except: pass if ann is None: ann = omero.model.TagAnnotationI() ann.textValue = rstring(str(tag)) ann.setDescription(rstring(str(desc))) self.conn.saveObject(ann) def createTagAnnotation(self, otype, tag, desc): otype = str(otype).lower() if not otype in ("project", "dataset", "image", "screen", "plate", "acquisition", "well"): raise AttributeError("Object type must be: project, dataset, image, screen, plate, acquisition, well. ") if otype == 'well': otype = 'Image' selfobject = self.well.getWellSample().image() elif otype == 'acquisition': otype = 'PlateAcquisition' selfobject = self.acquisition else: selfobject = getattr(self, otype) otype = otype.title() ann = None try: ann = self.conn.findTag(tag, desc)._obj except: pass if ann is None: ann = omero.model.TagAnnotationI() ann.textValue = rstring(str(tag)) ann.setDescription(rstring(str(desc))) t_ann = getattr(omero.model, otype+"AnnotationLinkI")() t_ann.setParent(selfobject._obj) t_ann.setChild(ann) self.conn.saveObject(t_ann) else: # Tag exists - check it isn't already linked to parent by this user params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = rlong(self.conn.getUser().id) # linked by current user links = self.conn.getAnnotationLinks(otype, parent_ids=[selfobject.id], ann_ids=[ann.id.val], params=params) links = list(links) if len(links) == 0: # current user has not already tagged this object t_ann = getattr(omero.model, otype+"AnnotationLinkI")() t_ann.setParent(selfobject._obj) t_ann.setChild(ann) self.conn.saveObject(t_ann) def checkMimetype(self, file_type): if file_type is None or len(file_type) == 0: file_type = "application/octet-stream" return file_type def createFileAnnotation(self, otype, newFile): otype = str(otype).lower() if not otype in ("project", "dataset", "image", "screen", "plate", "acquisition", "well"): raise AttributeError("Object type must be: project, dataset, image, screen, plate, acquisition, well. ") if otype == 'well': otype = 'Image' selfobject = self.well.getWellSample().image() elif otype == 'acquisition': otype = 'PlateAcquisition' selfobject = self.acquisition else: selfobject = getattr(self, otype) otype = otype.title() format = self.checkMimetype(newFile.content_type) oFile = omero.model.OriginalFileI() oFile.setName(rstring(smart_str(newFile.name))); oFile.setPath(rstring(smart_str(newFile.name))); oFile.setSize(rlong(long(newFile.size))); oFile.setSha1(rstring("pending")); oFile.setMimetype(rstring(str(format))); ofid = self.conn.saveAndReturnId(oFile); of = self.conn.saveAndReturnFile(newFile, ofid) fa = omero.model.FileAnnotationI() fa.setFile(of) l_ia = getattr(omero.model, otype+"AnnotationLinkI")() l_ia.setParent(selfobject._obj) l_ia.setChild(fa) self.conn.saveObject(l_ia) def createCommentAnnotations(self, content, oids): ann = omero.model.CommentAnnotationI() ann.textValue = rstring(str(content)) ann = self.conn.saveAndReturnObject(ann) new_links = list() for k in oids.keys(): if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample().image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(ann._obj) new_links.append(l_ann) if len(new_links) > 0 : self.conn.saveArray(new_links) def createTagAnnotations(self, tag, desc, oids): ann = None try: ann = self.conn.findTag(tag, desc) except: pass if ann is None: ann = omero.model.TagAnnotationI() ann.textValue = rstring(str(tag)) ann.setDescription(rstring(str(desc))) ann = self.conn.saveAndReturnObject(ann) new_links = list() for k in oids: if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample().image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(ann._obj) new_links.append(l_ann) if len(new_links) > 0 : self.conn.saveArray(new_links) def createFileAnnotations(self, newFile, oids): format = self.checkMimetype(newFile.content_type) oFile = omero.model.OriginalFileI() oFile.setName(rstring(smart_str(newFile.name))); oFile.setPath(rstring(smart_str(newFile.name))); oFile.setSize(rlong(long(newFile.size))); oFile.setSha1(rstring("pending")); oFile.setMimetype(rstring(str(format))); ofid = self.conn.saveAndReturnId(oFile); of = self.conn.saveAndReturnFile(newFile, ofid) fa = omero.model.FileAnnotationI() fa.setFile(of) fa = self.conn.saveAndReturnObject(fa) new_links = list() for k in oids: if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample().image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(fa._obj) new_links.append(l_ann) if len(new_links) > 0 : self.conn.saveArray(new_links) # Create links def createAnnotationLinks(self, otype, atype, ids): otype = str(otype).lower() if not otype in ("project", "dataset", "image", "screen", "plate", "acquisition", "well"): raise AttributeError("Object type must be: project, dataset, image, screen, plate, acquisition, well.") atype = str(atype).lower() if not atype in ("tag", "comment", "file"): raise AttributeError("Object type must be: tag, comment, file.") if otype == 'well': otype = 'Image' selfobject = self.well.getWellSample().image() elif otype == 'acquisition': otype = 'PlateAcquisition' selfobject = self.acquisition else: selfobject = getattr(self, otype) otype = otype.title() new_links = list() for a in self.conn.getObjects("Annotation", ids): ann = getattr(omero.model, otype+"AnnotationLinkI")() ann.setParent(selfobject._obj) ann.setChild(a._obj) new_links.append(ann) failed = 0 try: self.conn.saveArray(new_links) except omero.ValidationException, x: for l in new_links: try: self.conn.saveObject(l) except: failed+=1 return failed def createAnnotationsLinks(self, atype, tids, oids): #TODO: check if link already exist !!! atype = str(atype).lower() if not atype.lower() in ("tag", "comment", "file"): raise AttributeError("Object type must be: tag, comment, file.") new_links = list() for k in oids: if len(oids[k]) > 0: if k.lower() == 'acquisitions': t = 'PlateAcquisition' else: t = k.lower().title() for ob in self.conn.getObjects(t, [o.id for o in oids[k]]): for a in self.conn.getObjects("Annotation", tids): if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample().image() else: obj = ob l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(a._obj) new_links.append(l_ann) failed = 0 try: self.conn.saveArray(new_links) except omero.ValidationException, x: for l in new_links: try: self.conn.saveObject(l) except: failed+=1 return failed ################################################################ # Update def updateDescription(self, o_type, o_id, description=None): obj = getattr(self, o_type)._obj if description is not None and description != "" : obj.description = rstring(str(description)) else: obj.description = None self.conn.saveObject(obj) def updateName(self, o_type, o_id, name): obj = getattr(self, o_type)._obj if o_type not in ('tag', 'tagset'): obj.name = rstring(str(name)) else: obj.textValue = rstring(str(name)) self.conn.saveObject(obj) def updateImage(self, name, description=None): img = self.image._obj img.name = rstring(str(name)) if description is not None and description != "" : img.description = rstring(str(description)) else: img.description = None self.conn.saveObject(img) def updateDataset(self, name, description=None): container = self.dataset._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updatePlate(self, name, description=None): container = self.plate._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updateProject(self, name, description=None): container = self.project._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updateScreen(self, name, description=None): container = self.screen._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def saveCommentAnnotation(self, content): ann = self.comment._obj ann.textValue = rstring(str(content)) self.conn.saveObject(ann) def saveTagAnnotation(self, tag, description=None): ann = self.tag._obj ann.textValue = rstring(str(tag)) if description is not None and description != "" : ann.description = rstring(str(description)) else: ann.description = None self.conn.saveObject(ann) def move(self, parent, destination): if self.project is not None: return 'Cannot move project.' elif self.dataset is not None: if destination[0] == 'dataset': return 'Cannot move dataset to dataset' elif destination[0] == 'project': up_pdl = None pdls = self.dataset.getParentLinks() already_there = None for pdl in pdls: if pdl.parent.id.val == long(destination[1]): already_there = True if pdl.parent.id.val == long(parent[1]): up_pdl = pdl if already_there: if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_pdl._obj) else: new_pr = self.conn.getObject("Project", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl._obj) else: up_pdl = omero.model.ProjectDatasetLinkI() up_pdl.setChild(self.dataset._obj) up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl) elif destination[0] == 'experimenter': up_pdl = None for p in self.dataset.getParentLinks(): if p.parent.id.val == long(parent[1]): up_pdl = p self.conn.deleteObjectDirect(up_pdl._obj) elif destination[0] == 'orphaned': return 'Cannot move dataset to orphaned images.' else: return 'Destination not supported.' elif self.image is not None: if destination[0] == 'dataset': up_dsl = None dsls = self.image.getParentLinks() #gets every links for child already_there = None #checks links for dsl in dsls: #if is already linked to destination if dsl.parent.id.val == long(destination[1]): already_there = True # gets old parent to update or delete if dsl.parent.id.val == long(parent[1]): up_dsl = dsl if already_there: # delete link to not duplicate if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_dsl._obj) else: # update link to new destination new_ds = self.conn.getObject("Dataset", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl._obj) else: up_dsl = omero.model.DatasetImageLinkI() up_dsl.setChild(self.image._obj) up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl) elif destination[0] == 'project': return 'Cannot move image to project.' elif destination[0] == 'experimenter' or destination[0] == 'orphaned': if parent[0] != destination[0]: up_dsl = None dsls = list(self.image.getParentLinks()) #gets every links for child if len(dsls) == 1: # gets old parent to delete if dsls[0].parent.id.val == long(parent[1]): up_dsl = dsls[0] self.conn.deleteObjectDirect(up_dsl._obj) else: return 'This image is linked in multiple places. Please unlink the image first.' else: return 'Destination not supported.' elif self.screen is not None: return 'Cannot move screen.' elif self.plate is not None: if destination[0] == 'plate': return 'Cannot move plate to plate' elif destination[0] == 'screen': up_spl = None spls = self.plate.getParentLinks() already_there = None for spl in spls: if spl.parent.id.val == long(destination[1]): already_there = True if spl.parent.id.val == long(parent[1]): up_spl = spl if already_there: if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_spl._obj) else: new_sc = self.conn.getObject("Screen", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl._obj) else: up_spl = omero.model.ScreenPlateLinkI() up_spl.setChild(self.plate._obj) up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl) elif destination[0] == 'experimenter' or destination[0] == 'orphaned': if parent[0] != destination[0]: up_spl = None spls = list(self.plate.getParentLinks()) #gets every links for child if len(spls) == 1: # gets old parent to delete if spls[0].parent.id.val == long(parent[1]): up_spl = spls[0] self.conn.deleteObjectDirect(up_spl._obj) else: return 'This plate is linked in multiple places. Please unlink the plate first.' else: return 'Destination not supported.' else: return 'No data was choosen.' return def remove(self, parent): if self.tag: for al in self.tag.getParentLinks(str(parent[0]), [long(parent[1])]): if al is not None and al.details.owner.id.val == self.conn.getUser().id: self.conn.deleteObjectDirect(al._obj) elif self.file: for al in self.file.getParentLinks(str(parent[0]), [long(parent[1])]): if al is not None and al.details.owner.id.val == self.conn.getUser().id: self.conn.deleteObjectDirect(al._obj) elif self.comment: # remove the comment from specified parent for al in self.comment.getParentLinks(str(parent[0]), [long(parent[1])]): if al is not None and al.details.owner.id.val == self.conn.getUser().id: self.conn.deleteObjectDirect(al._obj) # if comment is orphan, delete it directly orphan = True for parentType in ["Project", "Dataset", "Image", "Screen", "Plate"]: annLinks = list(self.conn.getAnnotationLinks(parentType, ann_ids=[self.comment.id])) if len(annLinks) > 0: orphan = False break if orphan: self.conn.deleteObjectDirect(self.comment._obj) elif self.dataset is not None: if parent[0] == 'project': for pdl in self.dataset.getParentLinks([parent[1]]): if pdl is not None: self.conn.deleteObjectDirect(pdl._obj) elif self.plate is not None: if parent[0] == 'screen': for spl in self.plate.getParentLinks([parent[1]]): if spl is not None: self.conn.deleteObjectDirect(spl._obj) elif self.image is not None: if parent[0] == 'dataset': for dil in self.image.getParentLinks([parent[1]]): if dil is not None: self.conn.deleteObjectDirect(dil._obj) else: raise AttributeError("Attribute not specified. Cannot be removed.") def removemany(self, images): if self.dataset is not None: dil = self.dataset.getParentLinks('image', images) if dil is not None: self.conn.deleteObjectDirect(dil._obj) else: raise AttributeError("Attribute not specified. Cannot be removed.") ########################################################## # Copy def paste(self, destination): if self.project is not None: return 'Cannot paste project.' elif self.dataset is not None: if destination[0] == 'dataset': return 'Cannot paste dataset to dataset' elif destination[0] == 'project': pdls = self.dataset.getParentLinks() already_there = None for pdl in pdls: if pdl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Dataset is already there.' else: new_pr = self.conn.getObject("Project", destination[1]) up_pdl = omero.model.ProjectDatasetLinkI() up_pdl.setChild(self.dataset._obj) up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl) else: return 'Destination not supported.' elif self.image is not None: if destination[0] == 'dataset': dsls = self.image.getParentLinks() #gets every links for child already_there = None #checks links for dsl in dsls: #if is already linked to destination if dsl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Image is already there.' else: # update link to new destination new_ds = self.conn.getObject("Dataset", destination[1]) up_dsl = omero.model.DatasetImageLinkI() up_dsl.setChild(self.image._obj) up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl) elif destination[0] == 'project': return 'Cannot copy image to project.' else: return 'Destination not supported.' elif self.screen is not None: return 'Cannot paste screen.' elif self.plate is not None: if destination[0] == 'plate': return 'Cannot move plate to plate' elif destination[0] == 'screen': spls = self.plate.getParentLinks() already_there = None for spl in spls: if spl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Plate is already there.' else: new_sc = self.conn.getObject("Screen", destination[1]) up_spl = omero.model.ScreenPlateLinkI() up_spl.setChild(self.plate._obj) up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl) else: return 'Destination not supported.' else: return 'No data was choosen.' def copyImageToDataset(self, source, destination=None): if destination is None: dsls = self.conn.getDatasetImageLinks(source[1]) #gets every links for child for dsl in dsls: self.conn.deleteObjectDirect(dsl._obj) else: im = self.conn.getObject("Image", source[1]) ds = self.conn.getObject("Dataset", destination[1]) new_dsl = omero.model.DatasetImageLinkI() new_dsl.setChild(im._obj) new_dsl.setParent(ds._obj) self.conn.saveObject(new_dsl) def copyImagesToDataset(self, images, dataset): if dataset is not None and dataset[0] is not "dataset": ims = self.conn.getObjects("Image", images) ds = self.conn.getObject("Dataset", dataset[1]) link_array = list() for im in ims: new_dsl = omero.model.DatasetImageLinkI() new_dsl.setChild(im._obj) new_dsl.setParent(ds._obj) link_array.append(new_dsl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") def copyDatasetToProject(self, source, destination=None): if destination is not None and destination[0] is not "project": ds = self.conn.getObject("Dataset", source[1]) pr = self.conn.getObject("Project", destination[1]) new_pdl = omero.model.ProjectDatasetLinkI() new_pdl.setChild(ds._obj) new_pdl.setParent(pr._obj) self.conn.saveObject(new_pdl) raise AttributeError("Destination not supported") def copyDatasetsToProject(self, datasets, project): if project is not None and project[0] is not "project": dss = self.conn.getObjects("Dataset", datasets) pr = self.conn.getObject("Project", project[1]) link_array = list() for ds in dss: new_pdl = omero.model.ProjectDatasetLinkI() new_pdl.setChild(ds._obj) new_pdl.setParent(pr._obj) link_array.append(new_pdl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") def copyPlateToScreen(self, source, destination=None): if destination is not None and destination[0] is not "screen": pl = self.conn.getObject("Plate", source[1]) sc = self.conn.getObject("Screen", destination[1]) new_spl = omero.model.ScreenPlateLinkI() new_spl.setChild(pl._obj) new_spl.setParent(sc._obj) self.conn.saveObject(new_spl) raise AttributeError("Destination not supported") def copyPlatesToScreen(self, plates, screen): if screen is not None and screen[0] is not "screen": pls = self.conn.getObjects("Plate", plates) sc = self.conn.getObject("Screen", screen[1]) link_array = list() for pl in pls: new_spl = omero.model.ScreenPlateLinkI() new_spl.setChild(pl._obj) new_spl.setParent(sc._obj) link_array.append(new_spl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") ########################################################## # Delete def deleteItem(self, child=False, anns=False): handle = None if self.image: handle = self.conn.deleteObjects("Image", [self.image.id], deleteAnns=anns) elif self.dataset: handle = self.conn.deleteObjects("Dataset", [self.dataset.id], deleteChildren=child, deleteAnns=anns) elif self.project: handle = self.conn.deleteObjects("Project", [self.project.id], deleteChildren=child, deleteAnns=anns) elif self.screen: handle = self.conn.deleteObjects("Screen", [self.screen.id], deleteChildren=child, deleteAnns=anns) elif self.plate: handle = self.conn.deleteObjects("Plate", [self.plate.id], deleteAnns=anns) elif self.comment: handle = self.conn.deleteObjects("Annotation", [self.comment.id], deleteAnns=anns) elif self.tag: handle = self.conn.deleteObjects("Annotation", [self.tag.id], deleteAnns=anns) elif self.file: handle = self.conn.deleteObjects("Annotation", [self.file.id], deleteAnns=anns) return handle def deleteObjects(self, otype, ids, child=False, anns=False): return self.conn.deleteObjects(otype, ids, deleteChildren=child, deleteAnns=anns)
gpl-2.0
360,977,168,249,794,500
46.191646
232
0.564596
false
Fougere87/unsec
hugo_test.py
1
7674
#!/usr/bin/python3 import os import logging import unsec from sklearn import cluster import matplotlib.pyplot as plt import matplotlib as mpl from sklearn import metrics from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn.metrics import pairwise from sklearn import mixture import matplotlib.cm as cm import numpy as np from unsec import Email, EmailCollection, Cleaner, TestEmailCollection from unsec.vectorizer import TfidfVectorizer, LogicVectorizer from unsec.algorithm import SKMeanAlgo, HierarchicalAlgo from unsec import Clusterizer import unsec # import logging # logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO) # collection = TestEmailCollection(dataset = unsec.LARGE_DATASET_PATH) collection = EmailCollection() collection.add_from_files("data/complete/bioinfo_2014-0*") # collection.keep_lang("fr") engine = Clusterizer(collection) engine.target = "both" engine.set_vectorizer(TfidfVectorizer()) engine.set_algorithm(HierarchicalAlgo(n_clusters = 2, affinity ="cosine")) engine.run_cleaner() engine.run_vectorizer() prev_silhouette_res = -0.15 sd_treshold = 0.10 n_clust = 50 #numbre of clustering to iterate clust_to_reclust = [] for n_clusters in range(2,n_clust) : #==============================Computing new n clusters engine.set_algorithm(HierarchicalAlgo(n_clusters = n_clusters, affinity ="cosine")) engine.run_algorithm() matrix = np.array(engine.vectorizer.matrix) labels = engine.labels #=============================Calculating silhouette score of the clustering silhouette_res = metrics.silhouette_score(matrix, labels, metric='cosine') silhouette_diff = silhouette_res - prev_silhouette_res print(n_clusters, silhouette_res, silhouette_diff, sep="\t") #=============================Decision if silhouette_diff >= sd_treshold : # # for coll in engine.clusters : # print("================================") # for e in coll : # print(e.get_subject()) sample_silhouette_values = metrics.silhouette_samples(matrix, labels) for clust in range(n_clusters) : ith_cluster_silhouette_values = sample_silhouette_values[labels == clust] ith_cluster_silhouette_mean = np.mean(ith_cluster_silhouette_values) if ith_cluster_silhouette_mean < 0 and len([labels==clust]) > 30 : clust_to_reclust.append(engine.clusters[clust]) print("cluster,", clust, "is to be reclustered as it's silhouette score is ", ith_cluster_silhouette_mean) ax1 = plt y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = sample_silhouette_values[labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.title("The silhouette plot for the various clusters.") ax1.xlabel("The silhouette coefficient values") ax1.ylabel("Cluster label") ax1.axvline(x=silhouette_res, color="red", linestyle="--") plt.show() prev_silhouette_res = silhouette_res def unclusterded_clusters_detection(clusterizer) : sample_silhouette_values = clusterizer.silhouette_samples() clusters = [] labels = clusterizer.labels matrix = clusterizer.matrix for clust in range(max(labels)) : ith_cluster_silhouette_values = sample_silhouette_values[labels == clust] ith_cluster_silhouette_mean = np.mean(ith_cluster_silhouette_values) if ith_cluster_silhouette_mean < 0 : clust_to_reclust.append(clusterizer.clusters[clust]) return clust_to_reclust def reclusterise(clusters, target = "body", vectorizer = TfidfVectorizer(), algorithm = HierarchicalAlgo(), n_clusters = 2,affinity ="cosine") : new_clusts = [] for nc in clusters : print(nc) reclusterizer = Clusterizer(nc, target = "both") reclusterizer.set_algorithm(algorithm(n_clusters = 2, affinity = affinity)) reclusterizer.set_vectorizer(vectorizer) reclusterizer.compute() new_clusts.append(reclusterizer.clusters) return new_clusts sub_clusters =[] for nc in clust_to_reclust : sub_clusters.append(reclusterise(nc, n_clusters = 15)) for clust_ens in sub_clusters : for clust in clust_ens : print("============================================") [print(e) for i in clust.get_subjects()] # for c in engine.clusters : # for e in c : # print(e.get_subject()) # print(engine.algorithm.k_means.inertia_) # for n in range(2,50) : # engine.set_algo(SKMeanAlgo(n_clusters = n)) # engine.labels = engine.algorithm.run(engine.vectorizer.matrix) # engine.compute_clusters() # print(engine.algorithm.k_means.inertia_) # # # matrix_sub = Tools.vectorize_tf_idf(coll.all_cleaned_subjects) # create data matrix # matrix_bod = Tools.vectorize_tf_idf(coll.all_cleaned_bodies) # # Tools.matrix_to_csv(matrix_bod, Tools.words_in_collection(coll.all_cleaned_bodies), "tfidf_bod.csv") # k_means = cluster.KMeans(n_clusters=4) #create k-mean objet with n clusters as param # # print("K-mean fitting...") # k_means.fit(matrix_sub) # print(k_means.labels_) # # clusters_files = Clustering.get_clustered_docs(k_means.labels_,coll.files_list) # # [print(e) for e in clusters_files] # clusters_files = Clustering.get_clustered_docs(k_means.labels_,coll.files_list) # [print(e) for e in clusters_files] # # cluster1 = " ".join(Clustering.get_clustered_docs(k_means.labels_, coll.all_cleaned_bodies)[0]) # cluster2 = " ".join(Clustering.get_clustered_docs(k_means.labels_, coll.all_cleaned_bodies)[1]) # cluster3 = " ".join(Clustering.get_clustered_docs(k_means.labels_, coll.all_cleaned_bodies)[2]) # cluster4 = " ".join(Clustering.get_clustered_docs(k_means.labels_, coll.all_cleaned_bodies)[3]) # # def ntop_inverse_tf(raw, n) : tab =list(set(raw.split(" "))) l = len(raw.split(" ")) result = [] counts = [[]for i in range(2)] counts[0]=tab counts[1]=[raw.count(word)/l for word in tab] for j in range(n) : i = counts[1].index(max(counts[1])) result.append(counts[0][i]) counts[0].pop(i) counts[1].pop(i) return result # # print(ntop_inverse_tf(cluster1, 30)) # print(ntop_inverse_tf(cluster2, 30)) # print(ntop_inverse_tf(cluster3, 30)) # print(ntop_inverse_tf(cluster4, 30)) # # def get_nmax_elts(listpca, n) : # sorted_list = sorted(listpca) # listpca = list(listpca) # result = [] # for e in sorted_list[:n] : # result.append(listpca.index(e)) # return(result) # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # x_data = [e[0] for e in reduced_mat_cl1] # y_data = [e[1] for e in reduced_mat_cl1] # z_data = [e[2] for e in reduced_mat_cl1] # ax.scatter(x_data, y_data, z_data, depthshade=True) # plt.show() # Clustering.kmeans(matrix, 3) # print(Tools.vectorize_tf_idf(coll)[1]) #print(e.clean_body())
unlicense
-4,882,499,110,803,133,000
34.041096
144
0.6595
false
ishalyminov/memn2n
single.py
1
4686
"""Example running MemN2N on a single bAbI task. Download tasks from facebook.ai/babi """ from __future__ import absolute_import from __future__ import print_function from data_utils import load_task, vectorize_data from sklearn import cross_validation, metrics from memn2n import MemN2N from itertools import chain from six.moves import range, reduce import tensorflow as tf import numpy as np tf.flags.DEFINE_float("learning_rate", 0.01, "Learning rate for Adam Optimizer.") tf.flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam Optimizer.") tf.flags.DEFINE_float("max_grad_norm", 40.0, "Clip gradients to this norm.") tf.flags.DEFINE_integer("evaluation_interval", 10, "Evaluate and print results every x epochs") tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.") tf.flags.DEFINE_integer("hops", 3, "Number of hops in the Memory Network.") tf.flags.DEFINE_integer("epochs", 200, "Number of epochs to train for.") tf.flags.DEFINE_integer("embedding_size", 20, "Embedding size for embedding matrices.") tf.flags.DEFINE_integer("memory_size", 50, "Maximum size of memory.") tf.flags.DEFINE_integer("task_id", 1, "bAbI task id, 1 <= id <= 20") tf.flags.DEFINE_integer("random_state", None, "Random state.") tf.flags.DEFINE_string("data_dir", "data/tasks_1-20_v1-2/en/", "Directory containing bAbI tasks") FLAGS = tf.flags.FLAGS print("Started Task:", FLAGS.task_id) # task data train, test = load_task(FLAGS.data_dir, FLAGS.task_id) data = train + test vocab = sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + a) for s, q, a in data))) word_idx = dict((c, i + 1) for i, c in enumerate(vocab)) max_story_size = max(map(len, (s for s, _, _ in data))) mean_story_size = int(np.mean([ len(s) for s, _, _ in data ])) sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data))) query_size = max(map(len, (q for _, q, _ in data))) memory_size = min(FLAGS.memory_size, max_story_size) vocab_size = len(word_idx) + 1 # +1 for nil word sentence_size = max(query_size, sentence_size) # for the position print("Longest sentence length", sentence_size) print("Longest story length", max_story_size) print("Average story length", mean_story_size) # train/validation/test sets S, Q, A = vectorize_data(train, word_idx, sentence_size, memory_size) trainS, valS, trainQ, valQ, trainA, valA = cross_validation.train_test_split(S, Q, A, test_size=.1, random_state=FLAGS.random_state) testS, testQ, testA = vectorize_data(test, word_idx, sentence_size, memory_size) print(testS[0]) print("Training set shape", trainS.shape) # params n_train = trainS.shape[0] n_test = testS.shape[0] n_val = valS.shape[0] print("Training Size", n_train) print("Validation Size", n_val) print("Testing Size", n_test) train_labels = np.argmax(trainA, axis=1) test_labels = np.argmax(testA, axis=1) val_labels = np.argmax(valA, axis=1) tf.set_random_seed(FLAGS.random_state) batch_size = FLAGS.batch_size optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, epsilon=FLAGS.epsilon) batches = zip(range(0, n_train-batch_size, batch_size), range(batch_size, n_train, batch_size)) batches = [(start, end) for start, end in batches] with tf.Session() as sess: model = MemN2N(batch_size, vocab_size, sentence_size, memory_size, FLAGS.embedding_size, session=sess, hops=FLAGS.hops, max_grad_norm=FLAGS.max_grad_norm, optimizer=optimizer) for t in range(1, FLAGS.epochs+1): np.random.shuffle(batches) total_cost = 0.0 for start, end in batches: s = trainS[start:end] q = trainQ[start:end] a = trainA[start:end] cost_t = model.batch_fit(s, q, a) total_cost += cost_t if t % FLAGS.evaluation_interval == 0: train_preds = [] for start in range(0, n_train, batch_size): end = start + batch_size s = trainS[start:end] q = trainQ[start:end] pred = model.predict(s, q) train_preds += list(pred) val_preds = model.predict(valS, valQ) train_acc = metrics.accuracy_score(np.array(train_preds), train_labels) val_acc = metrics.accuracy_score(val_preds, val_labels) print('-----------------------') print('Epoch', t) print('Total Cost:', total_cost) print('Training Accuracy:', train_acc) print('Validation Accuracy:', val_acc) print('-----------------------') test_preds = model.predict(testS, testQ) test_acc = metrics.accuracy_score(test_preds, test_labels) print("Testing Accuracy:", test_acc)
mit
3,172,476,606,770,828,000
40.105263
132
0.660478
false
JackKelly/neuralnilm_prototype
scripts/e487.py
2
6833
from __future__ import print_function, division import matplotlib import logging from sys import stdout matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import (Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer, BidirectionalRecurrentLayer) from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff, RandomSegments, RandomSegmentsInMemory, SameLocation) from neuralnilm.experiment import run_experiment, init_experiment from neuralnilm.net import TrainingError from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer, SharedWeightsDenseLayer) from neuralnilm.objectives import (scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive, scaled_cost3) from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter from neuralnilm.updates import clipped_nesterov_momentum from neuralnilm.disaggregate import disaggregate from lasagne.nonlinearities import sigmoid, rectify, tanh, identity from lasagne.objectives import mse, binary_crossentropy from lasagne.init import Uniform, Normal, Identity from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer) from lasagne.layers.batch_norm import BatchNormLayer from lasagne.updates import nesterov_momentum, momentum from functools import partial import os import __main__ from copy import deepcopy from math import sqrt import numpy as np import theano.tensor as T import gc """ 447: first attempt at disaggregation """ NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] #PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" PATH = "/data/dk3810/figures" SAVE_PLOT_INTERVAL = 1000 N_SEQ_PER_BATCH = 64 source_dict = dict( filename='/data/dk3810/ukdale.h5', window=("2013-03-18", None), train_buildings=[1, 2, 3, 4, 5], validation_buildings=[1, 2, 3, 4, 5], n_seq_per_batch=N_SEQ_PER_BATCH, standardise_input=True, standardise_targets=True, independently_center_inputs=True, subsample_target=8, ignore_incomplete=True # offset_probability=0.5, # ignore_offset_activations=True ) net_dict = dict( save_plot_interval=SAVE_PLOT_INTERVAL, # loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH), # loss_function=lambda x, t: mdn_nll(x, t).mean(), # loss_function=lambda x, t: (mse(x, t) * MASK).mean(), loss_function=lambda x, t: mse(x, t).mean(), # loss_function=lambda x, t: binary_crossentropy(x, t).mean(), # loss_function=partial(scaled_cost, loss_func=mse), # loss_function=ignore_inactive, # loss_function=partial(scaled_cost3, ignore_inactive=False), # updates_func=momentum, updates_func=clipped_nesterov_momentum, updates_kwargs={'clip_range': (0, 10)}, learning_rate=1e-2, learning_rate_changes_by_iteration={ 1000: 1e-3, 5000: 1e-4 }, do_save_activations=True, auto_reshape=False, # plotter=CentralOutputPlotter plotter=Plotter(n_seq_to_plot=32) ) def exp_a(name, target_appliance, seq_length): global source source_dict_copy = deepcopy(source_dict) source_dict_copy.update(dict( target_appliance=target_appliance, logger=logging.getLogger(name), seq_length=seq_length )) source = RandomSegmentsInMemory(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source )) NUM_FILTERS = 4 target_seq_length = seq_length // source.subsample_target net_dict_copy['layers_config'] = [ { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # (batch, features, time) }, { 'label': 'conv0', 'type': Conv1DLayer, # convolve over the time axis 'num_filters': NUM_FILTERS, 'filter_length': 4, 'stride': 1, 'nonlinearity': None, 'border_mode': 'valid' }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # back to (batch, time, features) }, { 'label': 'dense0', 'type': DenseLayer, 'num_units': (seq_length - 3) * NUM_FILTERS, 'nonlinearity': rectify }, { 'label': 'dense2', 'type': DenseLayer, 'num_units': 128, 'nonlinearity': rectify }, { 'type': DenseLayer, 'num_units': (target_seq_length - 3) * NUM_FILTERS, 'nonlinearity': rectify }, { 'type': ReshapeLayer, 'shape': (N_SEQ_PER_BATCH, target_seq_length - 3, NUM_FILTERS) }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # (batch, features, time) }, { 'type': DeConv1DLayer, 'num_output_channels': 1, 'filter_length': 4, 'stride': 1, 'nonlinearity': None, 'border_mode': 'full' }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # back to (batch, time, features) } ] net = Net(**net_dict_copy) return net def main(): APPLIANCES = [ ('a', ['fridge freezer', 'fridge', 'freezer'], 512), ('b', "'coffee maker'", 512), ('c', "'dish washer'", 2000), ('d', "'hair dryer'", 256), ('e', "'kettle'", 256), ('f', "'oven'", 2000), ('g', "'toaster'", 256), ('h', "'light'", 2000), ('i', ['washer dryer', 'washing machine'], 1500) ] for experiment, appliance, seq_length in APPLIANCES[:1]: full_exp_name = NAME + experiment func_call = init_experiment(PATH, 'a', full_exp_name) func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length) logger = logging.getLogger(full_exp_name) try: net = eval(func_call) run_experiment(net, epochs=None) except KeyboardInterrupt: logger.info("KeyboardInterrupt") break except Exception as exception: logger.exception("Exception") # raise else: del net.source del net gc.collect() finally: logging.shutdown() if __name__ == "__main__": main() """ Emacs variables Local Variables: compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e487.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/" End: """
mit
5,616,429,882,336,762,000
31.538095
133
0.590517
false
DavidAce/PT
Data_Analysis/dataanaylysis.py
1
1536
import matplotlib.pyplot as plt import pandas as pd import numpy as np import os.path import h5py filename = '../output/data.h5' if(not os.path.exists(filename)): print("File does not exist.") exit(1) store = pd.HDFStore(filename,mode='r') #Use this to read hdf5 tables. Open in read mode. h5f = h5py.File(filename,'r') #Use this to read hdf5 datasets. Pandas does not like datasets, but h5py does. thermo_exists = "thermodynamics" in store temps_exists = "temperatures/T" in store if thermo_exists and temps_exists: keys = [s for s in store.keys() if "thermodynamics" in s] # Scan for all datasets in "group" # T = h5f["temperatures/T"].value #Use this to read the temperature dataset instead. This is needed in older versions of the PT program. thermo = pd.DataFrame() for key in keys: headers = list(store[key]) thermo = pd.concat([thermo, store[key].tail(1)], ignore_index = True) # Take last element in each table thermo = thermo.sort_values(by="T") # Sort by temperature. This this is needed because T10 would come before T2 otherwise print(thermo) fig, ax = plt.subplots(2, 2, sharex=True) ax[0,0].errorbar(thermo["T"], thermo["u"], yerr=thermo["u_std"], capsize=4) ax[0,1].errorbar(thermo["T"], thermo["m"], yerr=thermo["m_std"], capsize=4) ax[1,0].errorbar(thermo["T"], thermo["c"], yerr=thermo["c_std"], capsize=4) ax[1,1].errorbar(thermo["T"], thermo["x"], yerr=thermo["x_std"], capsize=4) store.close() plt.show() exit()
gpl-3.0
5,602,338,793,825,159,000
44.205882
142
0.667318
false
wallinm1/kaggle-facebook-bot
hyperopt_xgb.py
1
2608
import pandas as pd from sklearn.cross_validation import cross_val_score, StratifiedKFold from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.externals import joblib from sklearn.pipeline import Pipeline import xgboost as xgb from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import os def score(params): global df_scores params['n_estimators'] = int(params['n_estimators']) print "Training with params : " print params sel_pct = int(params['sel_pct']) del params['sel_pct'] clf = xgb.XGBClassifier() clf.set_params(**params) pipeline = Pipeline([('selector', SelectPercentile(chi2, sel_pct)), ('clf', clf)]) scores = cross_val_score(pipeline, xtrain, y, scoring = 'roc_auc',cv = kf) score = scores.mean() print "\tScore {0}\n\n".format(score) row = [score, params['n_estimators'], params['learning_rate'], params['max_depth'], params['min_child_weight'], params['subsample'], params['gamma'], params['colsample_bytree'], sel_pct] df_scores.loc[len(df_scores.index)] = row df_scores.sort(columns = 'score', ascending = False, inplace = True) df_scores.to_csv(fname, index = False) return {'loss': score, 'status': STATUS_OK} def optimize(trials): space = { 'n_estimators' : hp.quniform('n_estimators', 5, 1000, 1), 'learning_rate' : hp.quniform('learning_rate', 0.001, 0.5, 0.001), 'max_depth' : hp.quniform('max_depth', 1, 13, 1), 'min_child_weight' : hp.quniform('min_child_weight', 1, 6, 1), 'subsample' : hp.quniform('subsample', 0.4, 1, 0.05), 'gamma' : hp.quniform('gamma', 0, 1, 0.05), 'colsample_bytree' : hp.quniform('colsample_bytree', 0.4, 1, 0.05), 'sel_pct' : hp.quniform('sel_pct', 1, 100, 1), 'objective' : 'binary:logistic', 'silent' : 1 } best = fmin(score, space, algo=tpe.suggest, trials=trials, max_evals=500) print best xtrain = joblib.load('data/xtrain.pkl') y = joblib.load('data/y.pkl') nf = 4 kf = StratifiedKFold(y, n_folds = nf, random_state = 42, shuffle = True) fname = 'hyperopt_xgb.csv' if os.path.isfile(fname): df_scores = pd.read_csv(fname) else: df_scores = pd.DataFrame(columns = ('score', 'n_estimators','learning_rate', 'max_depth', 'min_child_weight', 'subsample', 'gamma', 'colsample_bytree', 'sel_pct')) trials = Trials() optimize(trials)
mit
2,690,637,892,772,345,000
41.080645
80
0.593942
false
freedomtan/tensorflow
tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py
4
3895
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for Keras hashing preprocessing layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import random import string import time import numpy as np from tensorflow.python import keras from tensorflow.python.compat import v2_compat from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.keras.layers.preprocessing import hashing from tensorflow.python.ops import string_ops from tensorflow.python.platform import benchmark from tensorflow.python.platform import test v2_compat.enable_v2_behavior() # word_gen creates random sequences of ASCII letters (both lowercase and upper). # The number of unique strings is ~2,700. def word_gen(): for _ in itertools.count(1): yield "".join(random.choice(string.ascii_letters) for i in range(2)) class BenchmarkLayer(benchmark.TensorFlowBenchmark): """Benchmark the layer forward pass.""" def run_dataset_implementation(self, batch_size): num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string, tensor_shape.TensorShape([])) ds = ds.shuffle(batch_size * 100) ds = ds.batch(batch_size) num_batches = 5 ds = ds.take(num_batches) ds = ds.prefetch(num_batches) starts.append(time.time()) # Benchmarked code begins here. for i in ds: _ = string_ops.string_to_hash_bucket(i, num_buckets=2) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches return avg_time def bm_layer_implementation(self, batch_size): input_1 = keras.Input(shape=(None,), dtype=dtypes.string, name="word") layer = hashing.Hashing(num_bins=2) _ = layer(input_1) num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string, tensor_shape.TensorShape([])) ds = ds.shuffle(batch_size * 100) ds = ds.batch(batch_size) num_batches = 5 ds = ds.take(num_batches) ds = ds.prefetch(num_batches) starts.append(time.time()) # Benchmarked code begins here. for i in ds: _ = layer(i) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches name = "hashing|batch_%s" % batch_size baseline = self.run_dataset_implementation(batch_size) extras = { "dataset implementation baseline": baseline, "delta seconds": (baseline - avg_time), "delta percent": ((baseline - avg_time) / baseline) * 100 } self.report_benchmark( iters=num_repeats, wall_time=avg_time, extras=extras, name=name) def benchmark_vocab_size_by_batch(self): for batch in [32, 64, 256]: self.bm_layer_implementation(batch_size=batch) if __name__ == "__main__": test.main()
apache-2.0
8,939,267,146,637,729,000
33.776786
80
0.661617
false
scivision/dmcutils
cam_intensity.py
1
6411
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Assumes observing from 66.986330° N, 50.943941° W. I did not take into account slight ground distance between camera and radar. E-region magnetic zenith at 79.7213° el, 150.11° az clockwise from geographic north. But the beam was pointed at F-region magnetic zenith? Sondrestrom ISR was pointing at 80.55 el, 141.0 az from 23:03:48 to 23:06:36 """ from pathlib import Path from os import devnull from datetime import datetime from numpy import empty, ones, unravel_index, percentile from numpy.ma import masked_where import h5py from matplotlib.pyplot import figure, draw, pause, subplots, show from matplotlib.colors import LogNorm from matplotlib.dates import DateFormatter import matplotlib.animation as anim from pymap3d.haversine import angledist import seaborn as sns sns.set_context('talk', font_scale=1.5) calfn = 'cal/DMC2015-11.h5' # magneticzenithazel = (150.11,79.7213) #degrees E-region SondrestromFWHM = 0.5 # degrees # %% def plotstats(bmean, bmin, bmax, bvar, t, imgfn, israzel, isrvalid): fg, ax = subplots(4, 1, sharex=True) tse = ' '.join([t.strftime('%X') for t in isrvalid]) fg.suptitle('{} az,el {} time: {}'.format(imgfn, israzel, tse)) ax[0].plot(t, bmean) ax[0].set_ylabel('mean') ax[1].plot(t, bmin) ax[1].set_ylabel('min') ax[2].plot(t, bmax) ax[2].set_ylabel('max') ax[3].plot(t, bvar) ax[3].set_ylabel('variance') ax[3].set_xlabel('estimated time [UTC]') for a in ax: a.set_yscale('log') a.grid(True, which='x') a.xaxis.set_major_formatter(DateFormatter('%H:%M:%S')) fg.autofmt_xdate() fg.tight_layout() def loadplot(imgfn, calfn, israzel, isrvalid, showmovie, writemovie): imgfn = Path(imgfn).expanduser() calfn = Path(calfn).expanduser() # %% indices corresponding to the sondestrom beam with h5py.File(str(calfn), 'r', libver='latest') as h: az = h['az'][:] el = h['el'][:] dang = angledist(israzel[0], israzel[1], az, el) mask = dang < SondrestromFWHM boresight_rc = unravel_index(dang.argmin(), az.shape) Npixmask = mask.sum() print('found {} pixels in Sondrestrom ISR beam'.format(Npixmask)) if Npixmask == 0: raise ValueError('No overlap of radar beam with camera FOV') # %% ingest images tvalid = [t.timestamp() for t in isrvalid] with h5py.File(str(imgfn), "r", libver='latest') as h: uts = h["ut1_unix"][:] utind = (tvalid[0] <= uts) & (uts <= tvalid[1]) print('loading image data into RAM') imgs = h['rawimg'][utind, ...] Nimg = imgs.shape[0] print('{} images loaded from {}'.format(Nimg, imgfn)) # %% Statistics of image pixels corresponding to ISR beam t = [datetime.utcfromtimestamp(ut) for ut in uts[utind]] # %% plotting if showmovie or writemovie: Writer = anim.writers['ffmpeg'] writer = Writer(fps=5, metadata={'artist': 'Michael Hirsch'}, codec='ffv1') if writemovie: ofn = imgfn.rsplit('.', 1)[0] + '.mkv' else: ofn = devnull fg = figure() ax = fg.gca() vlim = percentile(imgs, (2, 99.9)) hi = ax.imshow(imgs[0, ...], cmap='gray', norm=LogNorm(), origin='lower', vmin=vlim[0], vmax=vlim[1]) # primes display fg.colorbar(hi) mim = masked_where(~mask, ones(imgs.shape[1:])) ax.imshow(mim, cmap='bwr', alpha=.15, vmin=0, vmax=1, origin='lower') # radar beam ax.set_xlabel('x-pixel') ax.set_ylabel('y-pixel') ax.scatter(boresight_rc[1], boresight_rc[0], s=150, marker='*', alpha=0.3, color='b') c = ax.contour(az, colors='w', alpha=0.1) ax.clabel(c, inline=1, fmt='%0.1f') c = ax.contour(el, colors='w', alpha=0.1) ax.clabel(c, inline=1, fmt='%0.1f') ht = ax.set_title('', color='g') ax.grid(False) if showmovie or writemovie: with writer.saving(fg, str(ofn), 150): bmean, bmin, bmax, bvar = update( imgs, mask, t, hi, ht, showmovie, writemovie, writer) else: bmean, bmin, bmax, bvar = update( imgs, mask, t, None, None, showmovie, writemovie, None) plotstats(bmean, bmin, bmax, bvar, t, imgfn, israzel, isrvalid) def update(imgs, mask, t, hi, ht, showmovie, writemovie, writer): Nimg = imgs.shape[0] bmean = empty(Nimg) bmin = empty(Nimg) bmax = empty(Nimg) bvar = empty(Nimg) for i, img in enumerate(imgs): im = img[mask] bmean[i] = im.mean() bmin[i] = im.min() bmax[i] = im.max() bvar[i] = im.var() if showmovie or writemovie: hi.set_data(img) ht.set_text('{}'.format(t[i])) draw(), pause(0.001) if writemovie: writer.grab_frame(facecolor='k') return bmean, bmin, bmax, bvar if __name__ == '__main__': from argparse import ArgumentParser p = ArgumentParser() p.add_argument('-s', '--showmovie', help="show live movie (takes a while, don't use if you want quick summary plot", action='store_true') p.add_argument('-w', '--writemovie', help="write a LOSSLESS movie file that is viewable on phones, etc. easily from the HDF5 file", action='store_true') P = p.parse_args() if 0: # juha 11-14 imgfn = '~/data/2015-11-14/2015-11-14T0149-0202.h5' israzel = (141., 80.55) isrvalid = (datetime(2015, 11, 14, 1, 55, 9), datetime(2015, 11, 14, 1, 55, 49)) if 0: # juha 11-15 imgfn = "~/data/2015-11-15/2015-11-15T2304-2306.h5" israzel = (141., 80.55), # (321.,89.5) isrvalid = (datetime(2015, 11, 15, 23, 3, 48), datetime(2015, 11, 15, 23, 6, 36)) if 1: # asti imgfn = "~/data/2015-11-15/2015-11-15T2318-2320.h5" israzel = (141., 80.55) isrvalid = (datetime(2015, 11, 15, 23, 18, 5), datetime(2015, 11, 15, 23, 19, 53)) loadplot(imgfn, calfn, israzel, isrvalid, P.showmovie, P.writemovie) show()
gpl-3.0
8,059,602,880,121,933,000
33.446237
134
0.568129
false
andreabedini/numexpr
bench/vml_timing.py
5
5869
################################################################### # Numexpr - Fast numerical array expression evaluator for NumPy. # # License: MIT # Author: See AUTHORS.txt # # See LICENSE.txt and LICENSES/*.txt for details about copyright and # rights to use. #################################################################### from __future__ import print_function import sys import timeit import numpy import numexpr array_size = 1000*1000 iterations = 10 numpy_ttime = [] numpy_sttime = [] numpy_nttime = [] numexpr_ttime = [] numexpr_sttime = [] numexpr_nttime = [] def compare_times(expr, nexpr): global numpy_ttime global numpy_sttime global numpy_nttime global numexpr_ttime global numexpr_sttime global numexpr_nttime print("******************* Expression:", expr) setup_contiguous = setupNP_contiguous setup_strided = setupNP_strided setup_unaligned = setupNP_unaligned numpy_timer = timeit.Timer(expr, setup_contiguous) numpy_time = round(numpy_timer.timeit(number=iterations), 4) numpy_ttime.append(numpy_time) print('%30s %.4f'%('numpy:', numpy_time / iterations)) numpy_timer = timeit.Timer(expr, setup_strided) numpy_stime = round(numpy_timer.timeit(number=iterations), 4) numpy_sttime.append(numpy_stime) print('%30s %.4f'%('numpy strided:', numpy_stime / iterations)) numpy_timer = timeit.Timer(expr, setup_unaligned) numpy_ntime = round(numpy_timer.timeit(number=iterations), 4) numpy_nttime.append(numpy_ntime) print('%30s %.4f'%('numpy unaligned:', numpy_ntime / iterations)) evalexpr = 'evaluate("%s", optimization="aggressive")' % expr numexpr_timer = timeit.Timer(evalexpr, setup_contiguous) numexpr_time = round(numexpr_timer.timeit(number=iterations), 4) numexpr_ttime.append(numexpr_time) print('%30s %.4f'%("numexpr:", numexpr_time/iterations,), end=" ") print("Speed-up of numexpr over numpy:", round(numpy_time/numexpr_time, 4)) evalexpr = 'evaluate("%s", optimization="aggressive")' % expr numexpr_timer = timeit.Timer(evalexpr, setup_strided) numexpr_stime = round(numexpr_timer.timeit(number=iterations), 4) numexpr_sttime.append(numexpr_stime) print('%30s %.4f'%("numexpr strided:", numexpr_stime/iterations,), end=" ") print("Speed-up of numexpr over numpy:", \ round(numpy_stime/numexpr_stime, 4)) evalexpr = 'evaluate("%s", optimization="aggressive")' % expr numexpr_timer = timeit.Timer(evalexpr, setup_unaligned) numexpr_ntime = round(numexpr_timer.timeit(number=iterations), 4) numexpr_nttime.append(numexpr_ntime) print('%30s %.4f'%("numexpr unaligned:", numexpr_ntime/iterations,), end=" ") print("Speed-up of numexpr over numpy:", \ round(numpy_ntime/numexpr_ntime, 4)) print() setupNP = """\ from numpy import arange, linspace, arctan2, sqrt, sin, cos, exp, log from numpy import rec as records #from numexpr import evaluate from numexpr import %s # Initialize a recarray of 16 MB in size r=records.array(None, formats='a%s,i4,f4,f8', shape=%s) c1 = r.field('f0')%s i2 = r.field('f1')%s f3 = r.field('f2')%s f4 = r.field('f3')%s c1[:] = "a" i2[:] = arange(%s)/1000 f3[:] = linspace(0,1,len(i2)) f4[:] = f3*1.23 """ eval_method = "evaluate" setupNP_contiguous = setupNP % ((eval_method, 4, array_size,) + \ (".copy()",)*4 + \ (array_size,)) setupNP_strided = setupNP % (eval_method, 4, array_size, "", "", "", "", array_size) setupNP_unaligned = setupNP % (eval_method, 1, array_size, "", "", "", "", array_size) expressions = [] expressions.append('i2 > 0') expressions.append('f3+f4') expressions.append('f3+i2') expressions.append('exp(f3)') expressions.append('log(exp(f3)+1)/f4') expressions.append('0.1*i2 > arctan2(f3, f4)') expressions.append('sqrt(f3**2 + f4**2) > 1') expressions.append('sin(f3)>cos(f4)') expressions.append('f3**f4') def compare(expression=False): if expression: compare_times(expression, 1) sys.exit(0) nexpr = 0 for expr in expressions: nexpr += 1 compare_times(expr, nexpr) print() if __name__ == '__main__': import numexpr print("Numexpr version: ", numexpr.__version__) numpy.seterr(all='ignore') numexpr.set_vml_accuracy_mode('low') numexpr.set_vml_num_threads(2) if len(sys.argv) > 1: expression = sys.argv[1] print("expression-->", expression) compare(expression) else: compare() tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime) stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime) ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime) print("eval method: %s" % eval_method) print("*************** Numexpr vs NumPy speed-ups *******************") # print("numpy total:", sum(numpy_ttime)/iterations) # print("numpy strided total:", sum(numpy_sttime)/iterations) # print("numpy unaligned total:", sum(numpy_nttime)/iterations) # print("numexpr total:", sum(numexpr_ttime)/iterations) print("Contiguous case:\t %s (mean), %s (min), %s (max)" % \ (round(tratios.mean(), 2), round(tratios.min(), 2), round(tratios.max(), 2))) # print("numexpr strided total:", sum(numexpr_sttime)/iterations) print("Strided case:\t\t %s (mean), %s (min), %s (max)" % \ (round(stratios.mean(), 2), round(stratios.min(), 2), round(stratios.max(), 2))) # print("numexpr unaligned total:", sum(numexpr_nttime)/iterations) print("Unaligned case:\t\t %s (mean), %s (min), %s (max)" % \ (round(ntratios.mean(), 2), round(ntratios.min(), 2), round(ntratios.max(), 2)))
mit
7,529,341,098,715,401,000
33.321637
81
0.613733
false
tuanvu216/udacity-course
intro_to_machine_learning/lesson/lesson_13_validation/KFold CV in sklearn.py
1
1245
# -*- coding: utf-8 -*- """ Created on Fri Jan 02 15:14:27 2015 @author: tvu """ from sklearn.cross_validation import KFold t0 = time() kf = KFold(len(authors), 2) for train_indices, test_indices in kf: #make training and testing datasets features_train = [word_data[ii] for ii in train_indices] features_test = [word_data[ii] for ii in test_indices] authors_train = [authors[ii] for ii in train_indices] authors_test = [authors[ii] for ii in test_indices] # TFIDF and feature selection vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') features_train_transformed = vectorizer.fit_transform(features_train) features_test_transformed = vectorizer.fit_transform(features_test) selector = SelectPercentile(f_classif, percentile=10) selector.fit(features_train_trainsformed, authors_train) features_train_transformed = selector.transform(features_train_transformed).toarray() features_test_transformed = selector.transform(features_test_transformed).toarray() clf = GaussianNB() clf.fit(features_train_transformed, authors_train) print "training time:", round(time()-t0, 3), "s" t0 = time()
mit
-1,931,519,531,409,667,300
35.647059
89
0.686747
false
0x7678/gr-gsm
docs/doxygen/doxyxml/generated/compoundsuper.py
2
360038
#!/usr/bin/env python3 # # Generated Thu Jun 11 18:44:25 2009 by generateDS.py. # from __future__ import print_function from __future__ import unicode_literals import sys from xml.dom import minidom from xml.dom import Node import six # # User methods # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class # in a module named generatedssuper.py. try: from generatedssuper import GeneratedsSuper except ImportError as exp: class GeneratedsSuper(object): def format_string(self, input_data, input_name=''): return input_data def format_integer(self, input_data, input_name=''): return '%d' % input_data def format_float(self, input_data, input_name=''): return '%f' % input_data def format_double(self, input_data, input_name=''): return '%e' % input_data def format_boolean(self, input_data, input_name=''): return '%s' % input_data # # If you have installed IPython you can uncomment and use the following. # IPython is available from http://ipython.scipy.org/. # ## from IPython.Shell import IPShellEmbed ## args = '' ## ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', ## exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: # ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit') # # Globals # ExternalEncoding = 'ascii' # # Support/utility functions. # def showIndent(outfile, level): for idx in range(level): outfile.write(' ') def quote_xml(inStr): s1 = (isinstance(inStr, six.string_types) and inStr or '%s' % inStr) s1 = s1.replace('&', '&amp;') s1 = s1.replace('<', '&lt;') s1 = s1.replace('>', '&gt;') return s1 def quote_attrib(inStr): s1 = (isinstance(inStr, six.string_types) and inStr or '%s' % inStr) s1 = s1.replace('&', '&amp;') s1 = s1.replace('<', '&lt;') s1 = s1.replace('>', '&gt;') if '"' in s1: if "'" in s1: s1 = '"%s"' % s1.replace('"', "&quot;") else: s1 = "'%s'" % s1 else: s1 = '"%s"' % s1 return s1 def quote_python(inStr): s1 = inStr if s1.find("'") == -1: if s1.find('\n') == -1: return "'%s'" % s1 else: return "'''%s'''" % s1 else: if s1.find('"') != -1: s1 = s1.replace('"', '\\"') if s1.find('\n') == -1: return '"%s"' % s1 else: return '"""%s"""' % s1 class MixedContainer(object): # Constants for category: CategoryNone = 0 CategoryText = 1 CategorySimple = 2 CategoryComplex = 3 # Constants for content_type: TypeNone = 0 TypeText = 1 TypeString = 2 TypeInteger = 3 TypeFloat = 4 TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value def getCategory(self): return self.category def getContenttype(self, content_type): return self.content_type def getValue(self): return self.value def getName(self): return self.name def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex self.value.export(outfile, level, namespace,name) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name)) def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s",\n' % \ (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class _MemberSpec(object): def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container # # Data representation classes. # class DoxygenType(GeneratedsSuper): subclass = None superclass = None def __init__(self, version=None, compounddef=None): self.version = version self.compounddef = compounddef def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) else: return DoxygenType(*args_, **kwargs_) factory = staticmethod(factory) def get_compounddef(self): return self.compounddef def set_compounddef(self, compounddef): self.compounddef = compounddef def get_version(self): return self.version def set_version(self, version): self.version = version def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): outfile.write(' version=%s' % (quote_attrib(self.version), )) def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): if self.compounddef: self.compounddef.export(outfile, level, namespace_, name_='compounddef') def hasContent_(self): if ( self.compounddef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = "%s",\n' % (self.version,)) def exportLiteralChildren(self, outfile, level, name_): if self.compounddef: showIndent(outfile, level) outfile.write('compounddef=model_.compounddefType(\n') self.compounddef.exportLiteral(outfile, level, name_='compounddef') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compounddef': obj_ = compounddefType.factory() obj_.build(child_) self.set_compounddef(obj_) # end class DoxygenType class compounddefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): self.kind = kind self.prot = prot self.id = id self.compoundname = compoundname self.title = title if basecompoundref is None: self.basecompoundref = [] else: self.basecompoundref = basecompoundref if derivedcompoundref is None: self.derivedcompoundref = [] else: self.derivedcompoundref = derivedcompoundref if includes is None: self.includes = [] else: self.includes = includes if includedby is None: self.includedby = [] else: self.includedby = includedby self.incdepgraph = incdepgraph self.invincdepgraph = invincdepgraph if innerdir is None: self.innerdir = [] else: self.innerdir = innerdir if innerfile is None: self.innerfile = [] else: self.innerfile = innerfile if innerclass is None: self.innerclass = [] else: self.innerclass = innerclass if innernamespace is None: self.innernamespace = [] else: self.innernamespace = innernamespace if innerpage is None: self.innerpage = [] else: self.innerpage = innerpage if innergroup is None: self.innergroup = [] else: self.innergroup = innergroup self.templateparamlist = templateparamlist if sectiondef is None: self.sectiondef = [] else: self.sectiondef = sectiondef self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inheritancegraph = inheritancegraph self.collaborationgraph = collaborationgraph self.programlisting = programlisting self.location = location self.listofallmembers = listofallmembers def factory(*args_, **kwargs_): if compounddefType.subclass: return compounddefType.subclass(*args_, **kwargs_) else: return compounddefType(*args_, **kwargs_) factory = staticmethod(factory) def get_compoundname(self): return self.compoundname def set_compoundname(self, compoundname): self.compoundname = compoundname def get_title(self): return self.title def set_title(self, title): self.title = title def get_basecompoundref(self): return self.basecompoundref def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref def add_basecompoundref(self, value): self.basecompoundref.append(value) def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value def get_derivedcompoundref(self): return self.derivedcompoundref def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value def get_includes(self): return self.includes def set_includes(self, includes): self.includes = includes def add_includes(self, value): self.includes.append(value) def insert_includes(self, index, value): self.includes[index] = value def get_includedby(self): return self.includedby def set_includedby(self, includedby): self.includedby = includedby def add_includedby(self, value): self.includedby.append(value) def insert_includedby(self, index, value): self.includedby[index] = value def get_incdepgraph(self): return self.incdepgraph def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph def get_invincdepgraph(self): return self.invincdepgraph def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph def get_innerdir(self): return self.innerdir def set_innerdir(self, innerdir): self.innerdir = innerdir def add_innerdir(self, value): self.innerdir.append(value) def insert_innerdir(self, index, value): self.innerdir[index] = value def get_innerfile(self): return self.innerfile def set_innerfile(self, innerfile): self.innerfile = innerfile def add_innerfile(self, value): self.innerfile.append(value) def insert_innerfile(self, index, value): self.innerfile[index] = value def get_innerclass(self): return self.innerclass def set_innerclass(self, innerclass): self.innerclass = innerclass def add_innerclass(self, value): self.innerclass.append(value) def insert_innerclass(self, index, value): self.innerclass[index] = value def get_innernamespace(self): return self.innernamespace def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace def add_innernamespace(self, value): self.innernamespace.append(value) def insert_innernamespace(self, index, value): self.innernamespace[index] = value def get_innerpage(self): return self.innerpage def set_innerpage(self, innerpage): self.innerpage = innerpage def add_innerpage(self, value): self.innerpage.append(value) def insert_innerpage(self, index, value): self.innerpage[index] = value def get_innergroup(self): return self.innergroup def set_innergroup(self, innergroup): self.innergroup = innergroup def add_innergroup(self, value): self.innergroup.append(value) def insert_innergroup(self, index, value): self.innergroup[index] = value def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_sectiondef(self): return self.sectiondef def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef def add_sectiondef(self, value): self.sectiondef.append(value) def insert_sectiondef(self, index, value): self.sectiondef[index] = value def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inheritancegraph(self): return self.inheritancegraph def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph def get_collaborationgraph(self): return self.collaborationgraph def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph def get_programlisting(self): return self.programlisting def set_programlisting(self, programlisting): self.programlisting = programlisting def get_location(self): return self.location def set_location(self, location): self.location = location def get_listofallmembers(self): return self.listofallmembers def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compounddefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): if self.compoundname is not None: showIndent(outfile, level) outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) if self.title is not None: showIndent(outfile, level) outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) for basecompoundref_ in self.basecompoundref: basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') for derivedcompoundref_ in self.derivedcompoundref: derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') for includes_ in self.includes: includes_.export(outfile, level, namespace_, name_='includes') for includedby_ in self.includedby: includedby_.export(outfile, level, namespace_, name_='includedby') if self.incdepgraph: self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') if self.invincdepgraph: self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') for innerdir_ in self.innerdir: innerdir_.export(outfile, level, namespace_, name_='innerdir') for innerfile_ in self.innerfile: innerfile_.export(outfile, level, namespace_, name_='innerfile') for innerclass_ in self.innerclass: innerclass_.export(outfile, level, namespace_, name_='innerclass') for innernamespace_ in self.innernamespace: innernamespace_.export(outfile, level, namespace_, name_='innernamespace') for innerpage_ in self.innerpage: innerpage_.export(outfile, level, namespace_, name_='innerpage') for innergroup_ in self.innergroup: innergroup_.export(outfile, level, namespace_, name_='innergroup') if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') for sectiondef_ in self.sectiondef: sectiondef_.export(outfile, level, namespace_, name_='sectiondef') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inheritancegraph: self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') if self.collaborationgraph: self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') if self.programlisting: self.programlisting.export(outfile, level, namespace_, name_='programlisting') if self.location: self.location.export(outfile, level, namespace_, name_='location') if self.listofallmembers: self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') def hasContent_(self): if ( self.compoundname is not None or self.title is not None or self.basecompoundref is not None or self.derivedcompoundref is not None or self.includes is not None or self.includedby is not None or self.incdepgraph is not None or self.invincdepgraph is not None or self.innerdir is not None or self.innerfile is not None or self.innerclass is not None or self.innernamespace is not None or self.innerpage is not None or self.innergroup is not None or self.templateparamlist is not None or self.sectiondef is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inheritancegraph is not None or self.collaborationgraph is not None or self.programlisting is not None or self.location is not None or self.listofallmembers is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compounddefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) if self.title: showIndent(outfile, level) outfile.write('title=model_.xsd_string(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('basecompoundref=[\n') level += 1 for basecompoundref in self.basecompoundref: showIndent(outfile, level) outfile.write('model_.basecompoundref(\n') basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('derivedcompoundref=[\n') level += 1 for derivedcompoundref in self.derivedcompoundref: showIndent(outfile, level) outfile.write('model_.derivedcompoundref(\n') derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includes=[\n') level += 1 for includes in self.includes: showIndent(outfile, level) outfile.write('model_.includes(\n') includes.exportLiteral(outfile, level, name_='includes') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includedby=[\n') level += 1 for includedby in self.includedby: showIndent(outfile, level) outfile.write('model_.includedby(\n') includedby.exportLiteral(outfile, level, name_='includedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.incdepgraph: showIndent(outfile, level) outfile.write('incdepgraph=model_.graphType(\n') self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') showIndent(outfile, level) outfile.write('),\n') if self.invincdepgraph: showIndent(outfile, level) outfile.write('invincdepgraph=model_.graphType(\n') self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('innerdir=[\n') level += 1 for innerdir in self.innerdir: showIndent(outfile, level) outfile.write('model_.innerdir(\n') innerdir.exportLiteral(outfile, level, name_='innerdir') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerfile=[\n') level += 1 for innerfile in self.innerfile: showIndent(outfile, level) outfile.write('model_.innerfile(\n') innerfile.exportLiteral(outfile, level, name_='innerfile') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerclass=[\n') level += 1 for innerclass in self.innerclass: showIndent(outfile, level) outfile.write('model_.innerclass(\n') innerclass.exportLiteral(outfile, level, name_='innerclass') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innernamespace=[\n') level += 1 for innernamespace in self.innernamespace: showIndent(outfile, level) outfile.write('model_.innernamespace(\n') innernamespace.exportLiteral(outfile, level, name_='innernamespace') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerpage=[\n') level += 1 for innerpage in self.innerpage: showIndent(outfile, level) outfile.write('model_.innerpage(\n') innerpage.exportLiteral(outfile, level, name_='innerpage') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innergroup=[\n') level += 1 for innergroup in self.innergroup: showIndent(outfile, level) outfile.write('model_.innergroup(\n') innergroup.exportLiteral(outfile, level, name_='innergroup') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('sectiondef=[\n') level += 1 for sectiondef in self.sectiondef: showIndent(outfile, level) outfile.write('model_.sectiondef(\n') sectiondef.exportLiteral(outfile, level, name_='sectiondef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inheritancegraph: showIndent(outfile, level) outfile.write('inheritancegraph=model_.graphType(\n') self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') showIndent(outfile, level) outfile.write('),\n') if self.collaborationgraph: showIndent(outfile, level) outfile.write('collaborationgraph=model_.graphType(\n') self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') showIndent(outfile, level) outfile.write('),\n') if self.programlisting: showIndent(outfile, level) outfile.write('programlisting=model_.listingType(\n') self.programlisting.exportLiteral(outfile, level, name_='programlisting') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') if self.listofallmembers: showIndent(outfile, level) outfile.write('listofallmembers=model_.listofallmembersType(\n') self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compoundname': compoundname_ = '' for text__content_ in child_.childNodes: compoundname_ += text__content_.nodeValue self.compoundname = compoundname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'basecompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.basecompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'derivedcompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.derivedcompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includes': obj_ = incType.factory() obj_.build(child_) self.includes.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includedby': obj_ = incType.factory() obj_.build(child_) self.includedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'incdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_incdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'invincdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_invincdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerdir': obj_ = refType.factory() obj_.build(child_) self.innerdir.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerfile': obj_ = refType.factory() obj_.build(child_) self.innerfile.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerclass': obj_ = refType.factory() obj_.build(child_) self.innerclass.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innernamespace': obj_ = refType.factory() obj_.build(child_) self.innernamespace.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerpage': obj_ = refType.factory() obj_.build(child_) self.innerpage.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innergroup': obj_ = refType.factory() obj_.build(child_) self.innergroup.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sectiondef': obj_ = sectiondefType.factory() obj_.build(child_) self.sectiondef.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inheritancegraph': obj_ = graphType.factory() obj_.build(child_) self.set_inheritancegraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'collaborationgraph': obj_ = graphType.factory() obj_.build(child_) self.set_collaborationgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'programlisting': obj_ = listingType.factory() obj_.build(child_) self.set_programlisting(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listofallmembers': obj_ = listofallmembersType.factory() obj_.build(child_) self.set_listofallmembers(obj_) # end class compounddefType class listofallmembersType(GeneratedsSuper): subclass = None superclass = None def __init__(self, member=None): if member is None: self.member = [] else: self.member = member def factory(*args_, **kwargs_): if listofallmembersType.subclass: return listofallmembersType.subclass(*args_, **kwargs_) else: return listofallmembersType(*args_, **kwargs_) factory = staticmethod(factory) def get_member(self): return self.member def set_member(self, member): self.member = member def add_member(self, value): self.member.append(value) def insert_member(self, index, value): self.member[index] = value def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') def hasContent_(self): if ( self.member is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listofallmembersType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('member=[\n') level += 1 for member in self.member: showIndent(outfile, level) outfile.write('model_.member(\n') member.exportLiteral(outfile, level, name_='member') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'member': obj_ = memberRefType.factory() obj_.build(child_) self.member.append(obj_) # end class listofallmembersType class memberRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): self.virt = virt self.prot = prot self.refid = refid self.ambiguityscope = ambiguityscope self.scope = scope self.name = name def factory(*args_, **kwargs_): if memberRefType.subclass: return memberRefType.subclass(*args_, **kwargs_) else: return memberRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_scope(self): return self.scope def set_scope(self, scope): self.scope = scope def get_name(self): return self.name def set_name(self, name): self.name = name def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_ambiguityscope(self): return self.ambiguityscope def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberRefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.ambiguityscope is not None: outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): if self.scope is not None: showIndent(outfile, level) outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) def hasContent_(self): if ( self.scope is not None or self.name is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.ambiguityscope is not None: showIndent(outfile, level) outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('ambiguityscope'): self.ambiguityscope = attrs.get('ambiguityscope').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'scope': scope_ = '' for text__content_ in child_.childNodes: scope_ += text__content_.nodeValue self.scope = scope_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ # end class memberRefType class scope(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if scope.subclass: return scope.subclass(*args_, **kwargs_) else: return scope(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='scope') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='scope'): pass def exportChildren(self, outfile, level, namespace_='', name_='scope'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='scope'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class scope class name(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if name.subclass: return name.subclass(*args_, **kwargs_) else: return name(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='name') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='name'): pass def exportChildren(self, outfile, level, namespace_='', name_='name'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='name'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class name class compoundRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.virt = virt self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if compoundRefType.subclass: return compoundRefType.subclass(*args_, **kwargs_) else: return compoundRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compoundRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class compoundRefType class reimplementType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if reimplementType.subclass: return reimplementType.subclass(*args_, **kwargs_) else: return reimplementType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='reimplementType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='reimplementType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class reimplementType class incType(GeneratedsSuper): subclass = None superclass = None def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.local = local self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if incType.subclass: return incType.subclass(*args_, **kwargs_) else: return incType(*args_, **kwargs_) factory = staticmethod(factory) def get_local(self): return self.local def set_local(self, local): self.local = local def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='incType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='incType'): if self.local is not None: outfile.write(' local=%s' % (quote_attrib(self.local), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='incType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='incType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.local is not None: showIndent(outfile, level) outfile.write('local = "%s",\n' % (self.local,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('local'): self.local = attrs.get('local').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class incType class refType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refType.subclass: return refType.subclass(*args_, **kwargs_) else: return refType(*args_, **kwargs_) factory = staticmethod(factory) def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='refType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refType class refTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refTextType.subclass: return refTextType.subclass(*args_, **kwargs_) else: return refTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refTextType class sectiondefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, header=None, description=None, memberdef=None): self.kind = kind self.header = header self.description = description if memberdef is None: self.memberdef = [] else: self.memberdef = memberdef def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) else: return sectiondefType(*args_, **kwargs_) factory = staticmethod(factory) def get_header(self): return self.header def set_header(self, header): self.header = header def get_description(self): return self.description def set_description(self, description): self.description = description def get_memberdef(self): return self.memberdef def set_memberdef(self, memberdef): self.memberdef = memberdef def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): if self.header is not None: showIndent(outfile, level) outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) if self.description: self.description.export(outfile, level, namespace_, name_='description') for memberdef_ in self.memberdef: memberdef_.export(outfile, level, namespace_, name_='memberdef') def hasContent_(self): if ( self.header is not None or self.description is not None or self.memberdef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sectiondefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) if self.description: showIndent(outfile, level) outfile.write('description=model_.descriptionType(\n') self.description.exportLiteral(outfile, level, name_='description') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('memberdef=[\n') level += 1 for memberdef in self.memberdef: showIndent(outfile, level) outfile.write('model_.memberdef(\n') memberdef.exportLiteral(outfile, level, name_='memberdef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'header': header_ = '' for text__content_ in child_.childNodes: header_ += text__content_.nodeValue self.header = header_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'description': obj_ = descriptionType.factory() obj_.build(child_) self.set_description(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'memberdef': obj_ = memberdefType.factory() obj_.build(child_) self.memberdef.append(obj_) # end class sectiondefType class memberdefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): self.initonly = initonly self.kind = kind self.volatile = volatile self.const = const self.raisexx = raisexx self.virt = virt self.readable = readable self.prot = prot self.explicit = explicit self.new = new self.final = final self.writable = writable self.add = add self.static = static self.remove = remove self.sealed = sealed self.mutable = mutable self.gettable = gettable self.inline = inline self.settable = settable self.id = id self.templateparamlist = templateparamlist self.type_ = type_ self.definition = definition self.argsstring = argsstring self.name = name self.read = read self.write = write self.bitfield = bitfield if reimplements is None: self.reimplements = [] else: self.reimplements = reimplements if reimplementedby is None: self.reimplementedby = [] else: self.reimplementedby = reimplementedby if param is None: self.param = [] else: self.param = param if enumvalue is None: self.enumvalue = [] else: self.enumvalue = enumvalue self.initializer = initializer self.exceptions = exceptions self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inbodydescription = inbodydescription self.location = location if references is None: self.references = [] else: self.references = references if referencedby is None: self.referencedby = [] else: self.referencedby = referencedby def factory(*args_, **kwargs_): if memberdefType.subclass: return memberdefType.subclass(*args_, **kwargs_) else: return memberdefType(*args_, **kwargs_) factory = staticmethod(factory) def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_definition(self): return self.definition def set_definition(self, definition): self.definition = definition def get_argsstring(self): return self.argsstring def set_argsstring(self, argsstring): self.argsstring = argsstring def get_name(self): return self.name def set_name(self, name): self.name = name def get_read(self): return self.read def set_read(self, read): self.read = read def get_write(self): return self.write def set_write(self, write): self.write = write def get_bitfield(self): return self.bitfield def set_bitfield(self, bitfield): self.bitfield = bitfield def get_reimplements(self): return self.reimplements def set_reimplements(self, reimplements): self.reimplements = reimplements def add_reimplements(self, value): self.reimplements.append(value) def insert_reimplements(self, index, value): self.reimplements[index] = value def get_reimplementedby(self): return self.reimplementedby def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby def add_reimplementedby(self, value): self.reimplementedby.append(value) def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def get_enumvalue(self): return self.enumvalue def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue def add_enumvalue(self, value): self.enumvalue.append(value) def insert_enumvalue(self, index, value): self.enumvalue[index] = value def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_exceptions(self): return self.exceptions def set_exceptions(self, exceptions): self.exceptions = exceptions def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inbodydescription(self): return self.inbodydescription def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription def get_location(self): return self.location def set_location(self, location): self.location = location def get_references(self): return self.references def set_references(self, references): self.references = references def add_references(self, value): self.references.append(value) def insert_references(self, index, value): self.references[index] = value def get_referencedby(self): return self.referencedby def set_referencedby(self, referencedby): self.referencedby = referencedby def add_referencedby(self, value): self.referencedby.append(value) def insert_referencedby(self, index, value): self.referencedby[index] = value def get_initonly(self): return self.initonly def set_initonly(self, initonly): self.initonly = initonly def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_volatile(self): return self.volatile def set_volatile(self, volatile): self.volatile = volatile def get_const(self): return self.const def set_const(self, const): self.const = const def get_raise(self): return self.raisexx def set_raise(self, raisexx): self.raisexx = raisexx def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_readable(self): return self.readable def set_readable(self, readable): self.readable = readable def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_explicit(self): return self.explicit def set_explicit(self, explicit): self.explicit = explicit def get_new(self): return self.new def set_new(self, new): self.new = new def get_final(self): return self.final def set_final(self, final): self.final = final def get_writable(self): return self.writable def set_writable(self, writable): self.writable = writable def get_add(self): return self.add def set_add(self, add): self.add = add def get_static(self): return self.static def set_static(self, static): self.static = static def get_remove(self): return self.remove def set_remove(self, remove): self.remove = remove def get_sealed(self): return self.sealed def set_sealed(self, sealed): self.sealed = sealed def get_mutable(self): return self.mutable def set_mutable(self, mutable): self.mutable = mutable def get_gettable(self): return self.gettable def set_gettable(self, gettable): self.gettable = gettable def get_inline(self): return self.inline def set_inline(self, inline): self.inline = inline def get_settable(self): return self.settable def set_settable(self, settable): self.settable = settable def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberdefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): if self.initonly is not None: outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.volatile is not None: outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) if self.const is not None: outfile.write(' const=%s' % (quote_attrib(self.const), )) if self.raisexx is not None: outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.readable is not None: outfile.write(' readable=%s' % (quote_attrib(self.readable), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.explicit is not None: outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) if self.new is not None: outfile.write(' new=%s' % (quote_attrib(self.new), )) if self.final is not None: outfile.write(' final=%s' % (quote_attrib(self.final), )) if self.writable is not None: outfile.write(' writable=%s' % (quote_attrib(self.writable), )) if self.add is not None: outfile.write(' add=%s' % (quote_attrib(self.add), )) if self.static is not None: outfile.write(' static=%s' % (quote_attrib(self.static), )) if self.remove is not None: outfile.write(' remove=%s' % (quote_attrib(self.remove), )) if self.sealed is not None: outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) if self.mutable is not None: outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) if self.gettable is not None: outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) if self.inline is not None: outfile.write(' inline=%s' % (quote_attrib(self.inline), )) if self.settable is not None: outfile.write(' settable=%s' % (quote_attrib(self.settable), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.definition is not None: showIndent(outfile, level) outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) if self.argsstring is not None: showIndent(outfile, level) outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) if self.read is not None: showIndent(outfile, level) outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) if self.write is not None: showIndent(outfile, level) outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) if self.bitfield is not None: showIndent(outfile, level) outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) for reimplements_ in self.reimplements: reimplements_.export(outfile, level, namespace_, name_='reimplements') for reimplementedby_ in self.reimplementedby: reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') for enumvalue_ in self.enumvalue: enumvalue_.export(outfile, level, namespace_, name_='enumvalue') if self.initializer: self.initializer.export(outfile, level, namespace_, name_='initializer') if self.exceptions: self.exceptions.export(outfile, level, namespace_, name_='exceptions') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inbodydescription: self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') if self.location: self.location.export(outfile, level, namespace_, name_='location', ) for references_ in self.references: references_.export(outfile, level, namespace_, name_='references') for referencedby_ in self.referencedby: referencedby_.export(outfile, level, namespace_, name_='referencedby') def hasContent_(self): if ( self.templateparamlist is not None or self.type_ is not None or self.definition is not None or self.argsstring is not None or self.name is not None or self.read is not None or self.write is not None or self.bitfield is not None or self.reimplements is not None or self.reimplementedby is not None or self.param is not None or self.enumvalue is not None or self.initializer is not None or self.exceptions is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inbodydescription is not None or self.location is not None or self.references is not None or self.referencedby is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberdefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.initonly is not None: showIndent(outfile, level) outfile.write('initonly = "%s",\n' % (self.initonly,)) if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.volatile is not None: showIndent(outfile, level) outfile.write('volatile = "%s",\n' % (self.volatile,)) if self.const is not None: showIndent(outfile, level) outfile.write('const = "%s",\n' % (self.const,)) if self.raisexx is not None: showIndent(outfile, level) outfile.write('raisexx = "%s",\n' % (self.raisexx,)) if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.readable is not None: showIndent(outfile, level) outfile.write('readable = "%s",\n' % (self.readable,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.explicit is not None: showIndent(outfile, level) outfile.write('explicit = "%s",\n' % (self.explicit,)) if self.new is not None: showIndent(outfile, level) outfile.write('new = "%s",\n' % (self.new,)) if self.final is not None: showIndent(outfile, level) outfile.write('final = "%s",\n' % (self.final,)) if self.writable is not None: showIndent(outfile, level) outfile.write('writable = "%s",\n' % (self.writable,)) if self.add is not None: showIndent(outfile, level) outfile.write('add = "%s",\n' % (self.add,)) if self.static is not None: showIndent(outfile, level) outfile.write('static = "%s",\n' % (self.static,)) if self.remove is not None: showIndent(outfile, level) outfile.write('remove = "%s",\n' % (self.remove,)) if self.sealed is not None: showIndent(outfile, level) outfile.write('sealed = "%s",\n' % (self.sealed,)) if self.mutable is not None: showIndent(outfile, level) outfile.write('mutable = "%s",\n' % (self.mutable,)) if self.gettable is not None: showIndent(outfile, level) outfile.write('gettable = "%s",\n' % (self.gettable,)) if self.inline is not None: showIndent(outfile, level) outfile.write('inline = "%s",\n' % (self.inline,)) if self.settable is not None: showIndent(outfile, level) outfile.write('settable = "%s",\n' % (self.settable,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('reimplements=[\n') level += 1 for reimplements in self.reimplements: showIndent(outfile, level) outfile.write('model_.reimplements(\n') reimplements.exportLiteral(outfile, level, name_='reimplements') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('reimplementedby=[\n') level += 1 for reimplementedby in self.reimplementedby: showIndent(outfile, level) outfile.write('model_.reimplementedby(\n') reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('enumvalue=[\n') level += 1 for enumvalue in self.enumvalue: showIndent(outfile, level) outfile.write('model_.enumvalue(\n') enumvalue.exportLiteral(outfile, level, name_='enumvalue') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.initializer: showIndent(outfile, level) outfile.write('initializer=model_.linkedTextType(\n') self.initializer.exportLiteral(outfile, level, name_='initializer') showIndent(outfile, level) outfile.write('),\n') if self.exceptions: showIndent(outfile, level) outfile.write('exceptions=model_.linkedTextType(\n') self.exceptions.exportLiteral(outfile, level, name_='exceptions') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inbodydescription: showIndent(outfile, level) outfile.write('inbodydescription=model_.descriptionType(\n') self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('references=[\n') level += 1 for references in self.references: showIndent(outfile, level) outfile.write('model_.references(\n') references.exportLiteral(outfile, level, name_='references') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('referencedby=[\n') level += 1 for referencedby in self.referencedby: showIndent(outfile, level) outfile.write('model_.referencedby(\n') referencedby.exportLiteral(outfile, level, name_='referencedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('initonly'): self.initonly = attrs.get('initonly').value if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('volatile'): self.volatile = attrs.get('volatile').value if attrs.get('const'): self.const = attrs.get('const').value if attrs.get('raise'): self.raisexx = attrs.get('raise').value if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('readable'): self.readable = attrs.get('readable').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('explicit'): self.explicit = attrs.get('explicit').value if attrs.get('new'): self.new = attrs.get('new').value if attrs.get('final'): self.final = attrs.get('final').value if attrs.get('writable'): self.writable = attrs.get('writable').value if attrs.get('add'): self.add = attrs.get('add').value if attrs.get('static'): self.static = attrs.get('static').value if attrs.get('remove'): self.remove = attrs.get('remove').value if attrs.get('sealed'): self.sealed = attrs.get('sealed').value if attrs.get('mutable'): self.mutable = attrs.get('mutable').value if attrs.get('gettable'): self.gettable = attrs.get('gettable').value if attrs.get('inline'): self.inline = attrs.get('inline').value if attrs.get('settable'): self.settable = attrs.get('settable').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'definition': definition_ = '' for text__content_ in child_.childNodes: definition_ += text__content_.nodeValue self.definition = definition_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'argsstring': argsstring_ = '' for text__content_ in child_.childNodes: argsstring_ += text__content_.nodeValue self.argsstring = argsstring_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'read': read_ = '' for text__content_ in child_.childNodes: read_ += text__content_.nodeValue self.read = read_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'write': write_ = '' for text__content_ in child_.childNodes: write_ += text__content_.nodeValue self.write = write_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'bitfield': bitfield_ = '' for text__content_ in child_.childNodes: bitfield_ += text__content_.nodeValue self.bitfield = bitfield_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplements': obj_ = reimplementType.factory() obj_.build(child_) self.reimplements.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplementedby': obj_ = reimplementType.factory() obj_.build(child_) self.reimplementedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'enumvalue': obj_ = enumvalueType.factory() obj_.build(child_) self.enumvalue.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': obj_ = linkedTextType.factory() obj_.build(child_) self.set_initializer(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'exceptions': obj_ = linkedTextType.factory() obj_.build(child_) self.set_exceptions(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inbodydescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_inbodydescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'references': obj_ = referenceType.factory() obj_.build(child_) self.references.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'referencedby': obj_ = referenceType.factory() obj_.build(child_) self.referencedby.append(obj_) # end class memberdefType class definition(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if definition.subclass: return definition.subclass(*args_, **kwargs_) else: return definition(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='definition') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='definition'): pass def exportChildren(self, outfile, level, namespace_='', name_='definition'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='definition'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class definition class argsstring(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if argsstring.subclass: return argsstring.subclass(*args_, **kwargs_) else: return argsstring(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='argsstring') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): pass def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='argsstring'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class argsstring class read(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if read.subclass: return read.subclass(*args_, **kwargs_) else: return read(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='read') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='read'): pass def exportChildren(self, outfile, level, namespace_='', name_='read'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='read'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class read class write(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if write.subclass: return write.subclass(*args_, **kwargs_) else: return write(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='write') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='write'): pass def exportChildren(self, outfile, level, namespace_='', name_='write'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='write'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class write class bitfield(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if bitfield.subclass: return bitfield.subclass(*args_, **kwargs_) else: return bitfield(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='bitfield') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): pass def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='bitfield'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class bitfield class descriptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if descriptionType.subclass: return descriptionType.subclass(*args_, **kwargs_) else: return descriptionType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='descriptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='descriptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class descriptionType class enumvalueType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): self.prot = prot self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if enumvalueType.subclass: return enumvalueType.subclass(*args_, **kwargs_) else: return enumvalueType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.name is not None or self.initializer is not None or self.briefdescription is not None or self.detaileddescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='enumvalueType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'name', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': childobj_ = linkedTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'initializer', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'briefdescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'detaileddescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class enumvalueType class templateparamlistType(GeneratedsSuper): subclass = None superclass = None def __init__(self, param=None): if param is None: self.param = [] else: self.param = param def factory(*args_, **kwargs_): if templateparamlistType.subclass: return templateparamlistType.subclass(*args_, **kwargs_) else: return templateparamlistType(*args_, **kwargs_) factory = staticmethod(factory) def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): pass def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') def hasContent_(self): if ( self.param is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='templateparamlistType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) # end class templateparamlistType class paramType(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): self.type_ = type_ self.declname = declname self.defname = defname self.array = array self.defval = defval self.briefdescription = briefdescription def factory(*args_, **kwargs_): if paramType.subclass: return paramType.subclass(*args_, **kwargs_) else: return paramType(*args_, **kwargs_) factory = staticmethod(factory) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_declname(self): return self.declname def set_declname(self, declname): self.declname = declname def get_defname(self): return self.defname def set_defname(self, defname): self.defname = defname def get_array(self): return self.array def set_array(self, array): self.array = array def get_defval(self): return self.defval def set_defval(self, defval): self.defval = defval def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='paramType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): pass def exportChildren(self, outfile, level, namespace_='', name_='paramType'): if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.declname is not None: showIndent(outfile, level) outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) if self.defname is not None: showIndent(outfile, level) outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) if self.array is not None: showIndent(outfile, level) outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) if self.defval: self.defval.export(outfile, level, namespace_, name_='defval') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') def hasContent_(self): if ( self.type_ is not None or self.declname is not None or self.defname is not None or self.array is not None or self.defval is not None or self.briefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='paramType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) if self.defval: showIndent(outfile, level) outfile.write('defval=model_.linkedTextType(\n') self.defval.exportLiteral(outfile, level, name_='defval') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'declname': declname_ = '' for text__content_ in child_.childNodes: declname_ += text__content_.nodeValue self.declname = declname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defname': defname_ = '' for text__content_ in child_.childNodes: defname_ += text__content_.nodeValue self.defname = defname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'array': array_ = '' for text__content_ in child_.childNodes: array_ += text__content_.nodeValue self.array = array_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defval': obj_ = linkedTextType.factory() obj_.build(child_) self.set_defval(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) # end class paramType class declname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if declname.subclass: return declname.subclass(*args_, **kwargs_) else: return declname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='declname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='declname'): pass def exportChildren(self, outfile, level, namespace_='', name_='declname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='declname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class declname class defname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if defname.subclass: return defname.subclass(*args_, **kwargs_) else: return defname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='defname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='defname'): pass def exportChildren(self, outfile, level, namespace_='', name_='defname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='defname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class defname class array(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if array.subclass: return array.subclass(*args_, **kwargs_) else: return array(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='array') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='array'): pass def exportChildren(self, outfile, level, namespace_='', name_='array'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='array'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class array class linkedTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, ref=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if linkedTextType.subclass: return linkedTextType.subclass(*args_, **kwargs_) else: return linkedTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): pass def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkedTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class linkedTextType class graphType(GeneratedsSuper): subclass = None superclass = None def __init__(self, node=None): if node is None: self.node = [] else: self.node = node def factory(*args_, **kwargs_): if graphType.subclass: return graphType.subclass(*args_, **kwargs_) else: return graphType(*args_, **kwargs_) factory = staticmethod(factory) def get_node(self): return self.node def set_node(self, node): self.node = node def add_node(self, value): self.node.append(value) def insert_node(self, index, value): self.node[index] = value def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='graphType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): pass def exportChildren(self, outfile, level, namespace_='', name_='graphType'): for node_ in self.node: node_.export(outfile, level, namespace_, name_='node') def hasContent_(self): if ( self.node is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='graphType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('node=[\n') level += 1 for node in self.node: showIndent(outfile, level) outfile.write('model_.node(\n') node.exportLiteral(outfile, level, name_='node') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'node': obj_ = nodeType.factory() obj_.build(child_) self.node.append(obj_) # end class graphType class nodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, label=None, link=None, childnode=None): self.id = id self.label = label self.link = link if childnode is None: self.childnode = [] else: self.childnode = childnode def factory(*args_, **kwargs_): if nodeType.subclass: return nodeType.subclass(*args_, **kwargs_) else: return nodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_label(self): return self.label def set_label(self, label): self.label = label def get_link(self): return self.link def set_link(self, link): self.link = link def get_childnode(self): return self.childnode def set_childnode(self, childnode): self.childnode = childnode def add_childnode(self, value): self.childnode.append(value) def insert_childnode(self, index, value): self.childnode[index] = value def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='nodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): if self.label is not None: showIndent(outfile, level) outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) if self.link: self.link.export(outfile, level, namespace_, name_='link') for childnode_ in self.childnode: childnode_.export(outfile, level, namespace_, name_='childnode') def hasContent_(self): if ( self.label is not None or self.link is not None or self.childnode is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='nodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) if self.link: showIndent(outfile, level) outfile.write('link=model_.linkType(\n') self.link.exportLiteral(outfile, level, name_='link') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('childnode=[\n') level += 1 for childnode in self.childnode: showIndent(outfile, level) outfile.write('model_.childnode(\n') childnode.exportLiteral(outfile, level, name_='childnode') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'label': label_ = '' for text__content_ in child_.childNodes: label_ += text__content_.nodeValue self.label = label_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'link': obj_ = linkType.factory() obj_.build(child_) self.set_link(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'childnode': obj_ = childnodeType.factory() obj_.build(child_) self.childnode.append(obj_) # end class nodeType class label(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if label.subclass: return label.subclass(*args_, **kwargs_) else: return label(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='label') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='label'): pass def exportChildren(self, outfile, level, namespace_='', name_='label'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='label'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class label class childnodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, relation=None, refid=None, edgelabel=None): self.relation = relation self.refid = refid if edgelabel is None: self.edgelabel = [] else: self.edgelabel = edgelabel def factory(*args_, **kwargs_): if childnodeType.subclass: return childnodeType.subclass(*args_, **kwargs_) else: return childnodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_edgelabel(self): return self.edgelabel def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel def add_edgelabel(self, value): self.edgelabel.append(value) def insert_edgelabel(self, index, value): self.edgelabel[index] = value def get_relation(self): return self.relation def set_relation(self, relation): self.relation = relation def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='childnodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): if self.relation is not None: outfile.write(' relation=%s' % (quote_attrib(self.relation), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): for edgelabel_ in self.edgelabel: showIndent(outfile, level) outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) def hasContent_(self): if ( self.edgelabel is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='childnodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.relation is not None: showIndent(outfile, level) outfile.write('relation = "%s",\n' % (self.relation,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('edgelabel=[\n') level += 1 for edgelabel in self.edgelabel: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('relation'): self.relation = attrs.get('relation').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'edgelabel': edgelabel_ = '' for text__content_ in child_.childNodes: edgelabel_ += text__content_.nodeValue self.edgelabel.append(edgelabel_) # end class childnodeType class edgelabel(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if edgelabel.subclass: return edgelabel.subclass(*args_, **kwargs_) else: return edgelabel(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='edgelabel') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): pass def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='edgelabel'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class edgelabel class linkType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, external=None, valueOf_=''): self.refid = refid self.external = external self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if linkType.subclass: return linkType.subclass(*args_, **kwargs_) else: return linkType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='linkType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class linkType class listingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, codeline=None): if codeline is None: self.codeline = [] else: self.codeline = codeline def factory(*args_, **kwargs_): if listingType.subclass: return listingType.subclass(*args_, **kwargs_) else: return listingType(*args_, **kwargs_) factory = staticmethod(factory) def get_codeline(self): return self.codeline def set_codeline(self, codeline): self.codeline = codeline def add_codeline(self, value): self.codeline.append(value) def insert_codeline(self, index, value): self.codeline[index] = value def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listingType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listingType'): for codeline_ in self.codeline: codeline_.export(outfile, level, namespace_, name_='codeline') def hasContent_(self): if ( self.codeline is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('codeline=[\n') level += 1 for codeline in self.codeline: showIndent(outfile, level) outfile.write('model_.codeline(\n') codeline.exportLiteral(outfile, level, name_='codeline') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'codeline': obj_ = codelineType.factory() obj_.build(child_) self.codeline.append(obj_) # end class listingType class codelineType(GeneratedsSuper): subclass = None superclass = None def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): self.external = external self.lineno = lineno self.refkind = refkind self.refid = refid if highlight is None: self.highlight = [] else: self.highlight = highlight def factory(*args_, **kwargs_): if codelineType.subclass: return codelineType.subclass(*args_, **kwargs_) else: return codelineType(*args_, **kwargs_) factory = staticmethod(factory) def get_highlight(self): return self.highlight def set_highlight(self, highlight): self.highlight = highlight def add_highlight(self, value): self.highlight.append(value) def insert_highlight(self, index, value): self.highlight[index] = value def get_external(self): return self.external def set_external(self, external): self.external = external def get_lineno(self): return self.lineno def set_lineno(self, lineno): self.lineno = lineno def get_refkind(self): return self.refkind def set_refkind(self, refkind): self.refkind = refkind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='codelineType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): if self.external is not None: outfile.write(' external=%s' % (quote_attrib(self.external), )) if self.lineno is not None: outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) if self.refkind is not None: outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): for highlight_ in self.highlight: highlight_.export(outfile, level, namespace_, name_='highlight') def hasContent_(self): if ( self.highlight is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='codelineType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = "%s",\n' % (self.external,)) if self.lineno is not None: showIndent(outfile, level) outfile.write('lineno = %s,\n' % (self.lineno,)) if self.refkind is not None: showIndent(outfile, level) outfile.write('refkind = "%s",\n' % (self.refkind,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('highlight=[\n') level += 1 for highlight in self.highlight: showIndent(outfile, level) outfile.write('model_.highlight(\n') highlight.exportLiteral(outfile, level, name_='highlight') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('external'): self.external = attrs.get('external').value if attrs.get('lineno'): try: self.lineno = int(attrs.get('lineno').value) except ValueError as exp: raise ValueError('Bad integer attribute (lineno): %s' % exp) if attrs.get('refkind'): self.refkind = attrs.get('refkind').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'highlight': obj_ = highlightType.factory() obj_.build(child_) self.highlight.append(obj_) # end class codelineType class highlightType(GeneratedsSuper): subclass = None superclass = None def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): self.classxx = classxx if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if highlightType.subclass: return highlightType.subclass(*args_, **kwargs_) else: return highlightType(*args_, **kwargs_) factory = staticmethod(factory) def get_sp(self): return self.sp def set_sp(self, sp): self.sp = sp def add_sp(self, value): self.sp.append(value) def insert_sp(self, index, value): self.sp[index] = value def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def get_class(self): return self.classxx def set_class(self, classxx): self.classxx = classxx def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='highlightType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): if self.classxx is not None: outfile.write(' class=%s' % (quote_attrib(self.classxx), )) def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.sp is not None or self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='highlightType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.classxx is not None: showIndent(outfile, level) outfile.write('classxx = "%s",\n' % (self.classxx,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('class'): self.classxx = attrs.get('class').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sp': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'sp', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class highlightType class sp(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if sp.subclass: return sp.subclass(*args_, **kwargs_) else: return sp(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sp') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sp'): pass def exportChildren(self, outfile, level, namespace_='', name_='sp'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sp'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class sp class referenceType(GeneratedsSuper): subclass = None superclass = None def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): self.endline = endline self.startline = startline self.refid = refid self.compoundref = compoundref if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if referenceType.subclass: return referenceType.subclass(*args_, **kwargs_) else: return referenceType(*args_, **kwargs_) factory = staticmethod(factory) def get_endline(self): return self.endline def set_endline(self, endline): self.endline = endline def get_startline(self): return self.startline def set_startline(self, startline): self.startline = startline def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_compoundref(self): return self.compoundref def set_compoundref(self, compoundref): self.compoundref = compoundref def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='referenceType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): if self.endline is not None: outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) if self.startline is not None: outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.compoundref is not None: outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='referenceType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.endline is not None: showIndent(outfile, level) outfile.write('endline = %s,\n' % (self.endline,)) if self.startline is not None: showIndent(outfile, level) outfile.write('startline = %s,\n' % (self.startline,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.compoundref is not None: showIndent(outfile, level) outfile.write('compoundref = %s,\n' % (self.compoundref,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('endline'): try: self.endline = int(attrs.get('endline').value) except ValueError as exp: raise ValueError('Bad integer attribute (endline): %s' % exp) if attrs.get('startline'): try: self.startline = int(attrs.get('startline').value) except ValueError as exp: raise ValueError('Bad integer attribute (startline): %s' % exp) if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('compoundref'): self.compoundref = attrs.get('compoundref').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class referenceType class locationType(GeneratedsSuper): subclass = None superclass = None def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): self.bodystart = bodystart self.line = line self.bodyend = bodyend self.bodyfile = bodyfile self.file = file self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if locationType.subclass: return locationType.subclass(*args_, **kwargs_) else: return locationType(*args_, **kwargs_) factory = staticmethod(factory) def get_bodystart(self): return self.bodystart def set_bodystart(self, bodystart): self.bodystart = bodystart def get_line(self): return self.line def set_line(self, line): self.line = line def get_bodyend(self): return self.bodyend def set_bodyend(self, bodyend): self.bodyend = bodyend def get_bodyfile(self): return self.bodyfile def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile def get_file(self): return self.file def set_file(self, file): self.file = file def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='locationType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): if self.bodystart is not None: outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) if self.line is not None: outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) if self.bodyend is not None: outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) if self.bodyfile is not None: outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) if self.file is not None: outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) def exportChildren(self, outfile, level, namespace_='', name_='locationType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='locationType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.bodystart is not None: showIndent(outfile, level) outfile.write('bodystart = %s,\n' % (self.bodystart,)) if self.line is not None: showIndent(outfile, level) outfile.write('line = %s,\n' % (self.line,)) if self.bodyend is not None: showIndent(outfile, level) outfile.write('bodyend = %s,\n' % (self.bodyend,)) if self.bodyfile is not None: showIndent(outfile, level) outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) if self.file is not None: showIndent(outfile, level) outfile.write('file = %s,\n' % (self.file,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('bodystart'): try: self.bodystart = int(attrs.get('bodystart').value) except ValueError as exp: raise ValueError('Bad integer attribute (bodystart): %s' % exp) if attrs.get('line'): try: self.line = int(attrs.get('line').value) except ValueError as exp: raise ValueError('Bad integer attribute (line): %s' % exp) if attrs.get('bodyend'): try: self.bodyend = int(attrs.get('bodyend').value) except ValueError as exp: raise ValueError('Bad integer attribute (bodyend): %s' % exp) if attrs.get('bodyfile'): self.bodyfile = attrs.get('bodyfile').value if attrs.get('file'): self.file = attrs.get('file').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class locationType class docSect1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect1Type.subclass: return docSect1Type.subclass(*args_, **kwargs_) else: return docSect1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect2 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect1Type class docSect2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect2Type.subclass: return docSect2Type.subclass(*args_, **kwargs_) else: return docSect2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect3 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect2Type class docSect3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect3Type.subclass: return docSect3Type.subclass(*args_, **kwargs_) else: return docSect3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect4(self): return self.sect4 def set_sect4(self, sect4): self.sect4 = sect4 def add_sect4(self, value): self.sect4.append(value) def insert_sect4(self, index, value): self.sect4[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect4 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect4': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect4', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect3Type class docSect4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect4Type.subclass: return docSect4Type.subclass(*args_, **kwargs_) else: return docSect4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect4Type class docInternalType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalType.subclass: return docInternalType.subclass(*args_, **kwargs_) else: return docInternalType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect1 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalType class docInternalS1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS1Type.subclass: return docInternalS1Type.subclass(*args_, **kwargs_) else: return docInternalS1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect2 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS1Type class docInternalS2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS2Type.subclass: return docInternalS2Type.subclass(*args_, **kwargs_) else: return docInternalS2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS2Type class docInternalS3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS3Type.subclass: return docInternalS3Type.subclass(*args_, **kwargs_) else: return docInternalS3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS3Type class docInternalS4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS4Type.subclass: return docInternalS4Type.subclass(*args_, **kwargs_) else: return docInternalS4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS4Type class docTitleType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTitleType.subclass: return docTitleType.subclass(*args_, **kwargs_) else: return docTitleType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTitleType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTitleType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTitleType class docParaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParaType.subclass: return docParaType.subclass(*args_, **kwargs_) else: return docParaType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docParaType class docMarkupType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docMarkupType.subclass: return docMarkupType.subclass(*args_, **kwargs_) else: return docMarkupType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docMarkupType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docMarkupType class docURLLink(GeneratedsSuper): subclass = None superclass = None def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): self.url = url if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docURLLink.subclass: return docURLLink.subclass(*args_, **kwargs_) else: return docURLLink(*args_, **kwargs_) factory = staticmethod(factory) def get_url(self): return self.url def set_url(self, url): self.url = url def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docURLLink') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): if self.url is not None: outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docURLLink'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.url is not None: showIndent(outfile, level) outfile.write('url = %s,\n' % (self.url,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('url'): self.url = attrs.get('url').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docURLLink class docAnchorType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docAnchorType.subclass: return docAnchorType.subclass(*args_, **kwargs_) else: return docAnchorType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docAnchorType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docAnchorType class docFormulaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docFormulaType.subclass: return docFormulaType.subclass(*args_, **kwargs_) else: return docFormulaType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docFormulaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docFormulaType class docIndexEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, primaryie=None, secondaryie=None): self.primaryie = primaryie self.secondaryie = secondaryie def factory(*args_, **kwargs_): if docIndexEntryType.subclass: return docIndexEntryType.subclass(*args_, **kwargs_) else: return docIndexEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_primaryie(self): return self.primaryie def set_primaryie(self, primaryie): self.primaryie = primaryie def get_secondaryie(self): return self.secondaryie def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): if self.primaryie is not None: showIndent(outfile, level) outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) if self.secondaryie is not None: showIndent(outfile, level) outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) def hasContent_(self): if ( self.primaryie is not None or self.secondaryie is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docIndexEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'primaryie': primaryie_ = '' for text__content_ in child_.childNodes: primaryie_ += text__content_.nodeValue self.primaryie = primaryie_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'secondaryie': secondaryie_ = '' for text__content_ in child_.childNodes: secondaryie_ += text__content_.nodeValue self.secondaryie = secondaryie_ # end class docIndexEntryType class docListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, listitem=None): if listitem is None: self.listitem = [] else: self.listitem = listitem def factory(*args_, **kwargs_): if docListType.subclass: return docListType.subclass(*args_, **kwargs_) else: return docListType(*args_, **kwargs_) factory = staticmethod(factory) def get_listitem(self): return self.listitem def set_listitem(self, listitem): self.listitem = listitem def add_listitem(self, value): self.listitem.append(value) def insert_listitem(self, index, value): self.listitem[index] = value def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListType'): for listitem_ in self.listitem: listitem_.export(outfile, level, namespace_, name_='listitem') def hasContent_(self): if ( self.listitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('listitem=[\n') level += 1 for listitem in self.listitem: showIndent(outfile, level) outfile.write('model_.listitem(\n') listitem.exportLiteral(outfile, level, name_='listitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listitem': obj_ = docListItemType.factory() obj_.build(child_) self.listitem.append(obj_) # end class docListType class docListItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None): if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docListItemType.subclass: return docListItemType.subclass(*args_, **kwargs_) else: return docListItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListItemType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docListItemType class docSimpleSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, title=None, para=None): self.kind = kind self.title = title if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docSimpleSectType.subclass: return docSimpleSectType.subclass(*args_, **kwargs_) else: return docSimpleSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.title: self.title.export(outfile, level, namespace_, name_='title') for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.title is not None or self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSimpleSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): if self.title: showIndent(outfile, level) outfile.write('title=model_.docTitleType(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docSimpleSectType class docVarListEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, term=None): self.term = term def factory(*args_, **kwargs_): if docVarListEntryType.subclass: return docVarListEntryType.subclass(*args_, **kwargs_) else: return docVarListEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_term(self): return self.term def set_term(self, term): self.term = term def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): if self.term: self.term.export(outfile, level, namespace_, name_='term', ) def hasContent_(self): if ( self.term is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVarListEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.term: showIndent(outfile, level) outfile.write('term=model_.docTitleType(\n') self.term.exportLiteral(outfile, level, name_='term') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'term': obj_ = docTitleType.factory() obj_.build(child_) self.set_term(obj_) # end class docVarListEntryType class docVariableListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docVariableListType.subclass: return docVariableListType.subclass(*args_, **kwargs_) else: return docVariableListType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVariableListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docVariableListType class docRefTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docRefTextType.subclass: return docRefTextType.subclass(*args_, **kwargs_) else: return docRefTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRefTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docRefTextType class docTableType(GeneratedsSuper): subclass = None superclass = None def __init__(self, rows=None, cols=None, row=None, caption=None): self.rows = rows self.cols = cols if row is None: self.row = [] else: self.row = row self.caption = caption def factory(*args_, **kwargs_): if docTableType.subclass: return docTableType.subclass(*args_, **kwargs_) else: return docTableType(*args_, **kwargs_) factory = staticmethod(factory) def get_row(self): return self.row def set_row(self, row): self.row = row def add_row(self, value): self.row.append(value) def insert_row(self, index, value): self.row[index] = value def get_caption(self): return self.caption def set_caption(self, caption): self.caption = caption def get_rows(self): return self.rows def set_rows(self, rows): self.rows = rows def get_cols(self): return self.cols def set_cols(self, cols): self.cols = cols def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTableType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): if self.rows is not None: outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) if self.cols is not None: outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): for row_ in self.row: row_.export(outfile, level, namespace_, name_='row') if self.caption: self.caption.export(outfile, level, namespace_, name_='caption') def hasContent_(self): if ( self.row is not None or self.caption is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTableType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.rows is not None: showIndent(outfile, level) outfile.write('rows = %s,\n' % (self.rows,)) if self.cols is not None: showIndent(outfile, level) outfile.write('cols = %s,\n' % (self.cols,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('row=[\n') level += 1 for row in self.row: showIndent(outfile, level) outfile.write('model_.row(\n') row.exportLiteral(outfile, level, name_='row') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.caption: showIndent(outfile, level) outfile.write('caption=model_.docCaptionType(\n') self.caption.exportLiteral(outfile, level, name_='caption') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('rows'): try: self.rows = int(attrs.get('rows').value) except ValueError as exp: raise ValueError('Bad integer attribute (rows): %s' % exp) if attrs.get('cols'): try: self.cols = int(attrs.get('cols').value) except ValueError as exp: raise ValueError('Bad integer attribute (cols): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'row': obj_ = docRowType.factory() obj_.build(child_) self.row.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'caption': obj_ = docCaptionType.factory() obj_.build(child_) self.set_caption(obj_) # end class docTableType class docRowType(GeneratedsSuper): subclass = None superclass = None def __init__(self, entry=None): if entry is None: self.entry = [] else: self.entry = entry def factory(*args_, **kwargs_): if docRowType.subclass: return docRowType.subclass(*args_, **kwargs_) else: return docRowType(*args_, **kwargs_) factory = staticmethod(factory) def get_entry(self): return self.entry def set_entry(self, entry): self.entry = entry def add_entry(self, value): self.entry.append(value) def insert_entry(self, index, value): self.entry[index] = value def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRowType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): for entry_ in self.entry: entry_.export(outfile, level, namespace_, name_='entry') def hasContent_(self): if ( self.entry is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRowType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('entry=[\n') level += 1 for entry in self.entry: showIndent(outfile, level) outfile.write('model_.entry(\n') entry.exportLiteral(outfile, level, name_='entry') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'entry': obj_ = docEntryType.factory() obj_.build(child_) self.entry.append(obj_) # end class docRowType class docEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, thead=None, para=None): self.thead = thead if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docEntryType.subclass: return docEntryType.subclass(*args_, **kwargs_) else: return docEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_thead(self): return self.thead def set_thead(self, thead): self.thead = thead def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): if self.thead is not None: outfile.write(' thead=%s' % (quote_attrib(self.thead), )) def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.thead is not None: showIndent(outfile, level) outfile.write('thead = "%s",\n' % (self.thead,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('thead'): self.thead = attrs.get('thead').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docEntryType class docCaptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docCaptionType.subclass: return docCaptionType.subclass(*args_, **kwargs_) else: return docCaptionType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCaptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCaptionType class docHeadingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): self.level = level if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docHeadingType.subclass: return docHeadingType.subclass(*args_, **kwargs_) else: return docHeadingType(*args_, **kwargs_) factory = staticmethod(factory) def get_level(self): return self.level def set_level(self, level): self.level = level def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): if self.level is not None: outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docHeadingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.level is not None: showIndent(outfile, level) outfile.write('level = %s,\n' % (self.level,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('level'): try: self.level = int(attrs.get('level').value) except ValueError as exp: raise ValueError('Bad integer attribute (level): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docHeadingType class docImageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): self.width = width self.type_ = type_ self.name = name self.height = height if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docImageType.subclass: return docImageType.subclass(*args_, **kwargs_) else: return docImageType(*args_, **kwargs_) factory = staticmethod(factory) def get_width(self): return self.width def set_width(self, width): self.width = width def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name def get_height(self): return self.height def set_height(self, height): self.height = height def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docImageType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): if self.width is not None: outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) if self.type_ is not None: outfile.write(' type=%s' % (quote_attrib(self.type_), )) if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) if self.height is not None: outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docImageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.width is not None: showIndent(outfile, level) outfile.write('width = %s,\n' % (self.width,)) if self.type_ is not None: showIndent(outfile, level) outfile.write('type_ = "%s",\n' % (self.type_,)) if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) if self.height is not None: showIndent(outfile, level) outfile.write('height = %s,\n' % (self.height,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('width'): self.width = attrs.get('width').value if attrs.get('type'): self.type_ = attrs.get('type').value if attrs.get('name'): self.name = attrs.get('name').value if attrs.get('height'): self.height = attrs.get('height').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docImageType class docDotFileType(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): self.name = name if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docDotFileType.subclass: return docDotFileType.subclass(*args_, **kwargs_) else: return docDotFileType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docDotFileType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('name'): self.name = attrs.get('name').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docDotFileType class docTocItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTocItemType.subclass: return docTocItemType.subclass(*args_, **kwargs_) else: return docTocItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTocItemType class docTocListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, tocitem=None): if tocitem is None: self.tocitem = [] else: self.tocitem = tocitem def factory(*args_, **kwargs_): if docTocListType.subclass: return docTocListType.subclass(*args_, **kwargs_) else: return docTocListType(*args_, **kwargs_) factory = staticmethod(factory) def get_tocitem(self): return self.tocitem def set_tocitem(self, tocitem): self.tocitem = tocitem def add_tocitem(self, value): self.tocitem.append(value) def insert_tocitem(self, index, value): self.tocitem[index] = value def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): for tocitem_ in self.tocitem: tocitem_.export(outfile, level, namespace_, name_='tocitem') def hasContent_(self): if ( self.tocitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('tocitem=[\n') level += 1 for tocitem in self.tocitem: showIndent(outfile, level) outfile.write('model_.tocitem(\n') tocitem.exportLiteral(outfile, level, name_='tocitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'tocitem': obj_ = docTocItemType.factory() obj_.build(child_) self.tocitem.append(obj_) # end class docTocListType class docLanguageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, langid=None, para=None): self.langid = langid if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docLanguageType.subclass: return docLanguageType.subclass(*args_, **kwargs_) else: return docLanguageType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_langid(self): return self.langid def set_langid(self, langid): self.langid = langid def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): if self.langid is not None: outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docLanguageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.langid is not None: showIndent(outfile, level) outfile.write('langid = %s,\n' % (self.langid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('langid'): self.langid = attrs.get('langid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docLanguageType class docParamListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, parameteritem=None): self.kind = kind if parameteritem is None: self.parameteritem = [] else: self.parameteritem = parameteritem def factory(*args_, **kwargs_): if docParamListType.subclass: return docParamListType.subclass(*args_, **kwargs_) else: return docParamListType(*args_, **kwargs_) factory = staticmethod(factory) def get_parameteritem(self): return self.parameteritem def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem def add_parameteritem(self, value): self.parameteritem.append(value) def insert_parameteritem(self, index, value): self.parameteritem[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): for parameteritem_ in self.parameteritem: parameteritem_.export(outfile, level, namespace_, name_='parameteritem') def hasContent_(self): if ( self.parameteritem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameteritem=[\n') level += 1 for parameteritem in self.parameteritem: showIndent(outfile, level) outfile.write('model_.parameteritem(\n') parameteritem.exportLiteral(outfile, level, name_='parameteritem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameteritem': obj_ = docParamListItem.factory() obj_.build(child_) self.parameteritem.append(obj_) # end class docParamListType class docParamListItem(GeneratedsSuper): subclass = None superclass = None def __init__(self, parameternamelist=None, parameterdescription=None): if parameternamelist is None: self.parameternamelist = [] else: self.parameternamelist = parameternamelist self.parameterdescription = parameterdescription def factory(*args_, **kwargs_): if docParamListItem.subclass: return docParamListItem.subclass(*args_, **kwargs_) else: return docParamListItem(*args_, **kwargs_) factory = staticmethod(factory) def get_parameternamelist(self): return self.parameternamelist def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist def add_parameternamelist(self, value): self.parameternamelist.append(value) def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value def get_parameterdescription(self): return self.parameterdescription def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): for parameternamelist_ in self.parameternamelist: parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') if self.parameterdescription: self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) def hasContent_(self): if ( self.parameternamelist is not None or self.parameterdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListItem'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameternamelist=[\n') level += 1 for parameternamelist in self.parameternamelist: showIndent(outfile, level) outfile.write('model_.parameternamelist(\n') parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.parameterdescription: showIndent(outfile, level) outfile.write('parameterdescription=model_.descriptionType(\n') self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameternamelist': obj_ = docParamNameList.factory() obj_.build(child_) self.parameternamelist.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameterdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_parameterdescription(obj_) # end class docParamListItem class docParamNameList(GeneratedsSuper): subclass = None superclass = None def __init__(self, parametername=None): if parametername is None: self.parametername = [] else: self.parametername = parametername def factory(*args_, **kwargs_): if docParamNameList.subclass: return docParamNameList.subclass(*args_, **kwargs_) else: return docParamNameList(*args_, **kwargs_) factory = staticmethod(factory) def get_parametername(self): return self.parametername def set_parametername(self, parametername): self.parametername = parametername def add_parametername(self, value): self.parametername.append(value) def insert_parametername(self, index, value): self.parametername[index] = value def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): for parametername_ in self.parametername: parametername_.export(outfile, level, namespace_, name_='parametername') def hasContent_(self): if ( self.parametername is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamNameList'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parametername=[\n') level += 1 for parametername in self.parametername: showIndent(outfile, level) outfile.write('model_.parametername(\n') parametername.exportLiteral(outfile, level, name_='parametername') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parametername': obj_ = docParamName.factory() obj_.build(child_) self.parametername.append(obj_) # end class docParamNameList class docParamName(GeneratedsSuper): subclass = None superclass = None def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): self.direction = direction if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParamName.subclass: return docParamName.subclass(*args_, **kwargs_) else: return docParamName(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def get_direction(self): return self.direction def set_direction(self, direction): self.direction = direction def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamName') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): if self.direction is not None: outfile.write(' direction=%s' % (quote_attrib(self.direction), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamName'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.direction is not None: showIndent(outfile, level) outfile.write('direction = "%s",\n' % (self.direction,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('direction'): self.direction = attrs.get('direction').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docParamName class docXRefSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, xreftitle=None, xrefdescription=None): self.id = id if xreftitle is None: self.xreftitle = [] else: self.xreftitle = xreftitle self.xrefdescription = xrefdescription def factory(*args_, **kwargs_): if docXRefSectType.subclass: return docXRefSectType.subclass(*args_, **kwargs_) else: return docXRefSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_xreftitle(self): return self.xreftitle def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle def add_xreftitle(self, value): self.xreftitle.append(value) def insert_xreftitle(self, index, value): self.xreftitle[index] = value def get_xrefdescription(self): return self.xrefdescription def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): for xreftitle_ in self.xreftitle: showIndent(outfile, level) outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) if self.xrefdescription: self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) def hasContent_(self): if ( self.xreftitle is not None or self.xrefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docXRefSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('xreftitle=[\n') level += 1 for xreftitle in self.xreftitle: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.xrefdescription: showIndent(outfile, level) outfile.write('xrefdescription=model_.descriptionType(\n') self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xreftitle': xreftitle_ = '' for text__content_ in child_.childNodes: xreftitle_ += text__content_.nodeValue self.xreftitle.append(xreftitle_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xrefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_xrefdescription(obj_) # end class docXRefSectType class docCopyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, link=None, para=None, sect1=None, internal=None): self.link = link if para is None: self.para = [] else: self.para = para if sect1 is None: self.sect1 = [] else: self.sect1 = sect1 self.internal = internal def factory(*args_, **kwargs_): if docCopyType.subclass: return docCopyType.subclass(*args_, **kwargs_) else: return docCopyType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_link(self): return self.link def set_link(self, link): self.link = link def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCopyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): if self.link is not None: outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') for sect1_ in self.sect1: sect1_.export(outfile, level, namespace_, name_='sect1') if self.internal: self.internal.export(outfile, level, namespace_, name_='internal') def hasContent_(self): if ( self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCopyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.link is not None: showIndent(outfile, level) outfile.write('link = %s,\n' % (self.link,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('sect1=[\n') level += 1 for sect1 in self.sect1: showIndent(outfile, level) outfile.write('model_.sect1(\n') sect1.exportLiteral(outfile, level, name_='sect1') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.internal: showIndent(outfile, level) outfile.write('internal=model_.docInternalType(\n') self.internal.exportLiteral(outfile, level, name_='internal') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('link'): self.link = attrs.get('link').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': obj_ = docSect1Type.factory() obj_.build(child_) self.sect1.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': obj_ = docInternalType.factory() obj_.build(child_) self.set_internal(obj_) # end class docCopyType class docCharType(GeneratedsSuper): subclass = None superclass = None def __init__(self, char=None, valueOf_=''): self.char = char self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docCharType.subclass: return docCharType.subclass(*args_, **kwargs_) else: return docCharType(*args_, **kwargs_) factory = staticmethod(factory) def get_char(self): return self.char def set_char(self, char): self.char = char def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCharType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): if self.char is not None: outfile.write(' char=%s' % (quote_attrib(self.char), )) def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCharType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.char is not None: showIndent(outfile, level) outfile.write('char = "%s",\n' % (self.char,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('char'): self.char = attrs.get('char').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCharType class docEmptyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docEmptyType.subclass: return docEmptyType.subclass(*args_, **kwargs_) else: return docEmptyType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEmptyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docEmptyType USAGE_TEXT = """ Usage: python <Parser>.py [ -s ] <in_xml_file> Options: -s Use the SAX parser, not the minidom parser. """ def usage(): print(USAGE_TEXT) sys.exit(1) def parse(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseLiteral(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('from compound import *\n\n') sys.stdout.write('rootObj = doxygen(\n') rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") sys.stdout.write(')\n') return rootObj def main(): args = sys.argv[1:] if len(args) == 1: parse(args[0]) else: usage() if __name__ == '__main__': main() #import pdb #pdb.run('main()')
gpl-3.0
5,698,839,263,969,963,000
42.138989
641
0.59627
false
GUR9000/Deep_MRI_brain_extraction
NNet_Core/NN_Analyzer.py
1
4154
""" This software is an implementation of Deep MRI brain extraction: A 3D convolutional neural network for skull stripping You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024 If you use this software for your projects please cite: Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping, NeuroImage, Volume 129, April 2016, Pages 460-469. The MIT License (MIT) Copyright (c) 2016 Gregor Urban, Jens Kleesiek Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import print_function import theano import theano.tensor as T import numpy as np class Analyzer(object): def __init__(self, cnn): self._cnn = cnn self._ranonce = False self._ranonce2 = False #################### def _runonce(self): if self._ranonce: return print(self,'compiling...') self._output_function = theano.function([self._cnn.layers[0].input], [lay.output for lay in self._cnn.layers]) self._ranonce=True #################### def _runonce2(self): if self._ranonce2: return print(self,'compiling...') output_layer_Gradients = T.grad(self._cnn.output_layer_Loss, self._cnn.params, disconnected_inputs="warn") self._output_function2 = theano.function([self._cnn.x, self._cnn.y], [x for x in output_layer_Gradients], on_unused_input='warn') self._ranonce2=True def analyze_forward_pass(self, *input): """ input should be a list of all inputs. ((DO NOT INCLUDE labels/targets!))""" self._runonce() outputs = self._output_function(*input) print() print( 'Analyzing internal outputs of network',self._cnn,' (I am',self,') ... ') for lay,out in zip(self._cnn.layers, outputs): mi,ma = np.min(out), np.max(out) mea,med = np.mean(out),np.median(out) std = np.std(out) print( '{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std)) print() return outputs def analyze_gradients(self, *input): """ input should be a list of all inputs and labels/targets""" self._runonce2() outputs = self._output_function2(*input) print() print( 'Analyzing internal gradients of network',self._cnn,' (I am',self,') ... ') i = 0 j = 0 for lay in self._cnn.layers: try: j = len(lay.params) except: j = 0 if j: for out in outputs[i:i+j]: mi,ma = np.min(out), np.max(out) mea,med = np.mean(out),np.median(out) std = np.std(out) print('{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std)) else: print( '{:^100}: no parameters'.format(lay)) i+=j print() return outputs
mit
4,321,185,761,625,126,000
40.54
163
0.614107
false
mespe/SolRad
collection/compare_cimis_cfsr/compare_cimis_cfsr.py
1
2737
import pandas as pd import matplotlib.pyplot as plt from netCDF4 import Dataset import netCDF4 def load_CFSR_data(): my_example_nc_file = 'RES.nc' # latitude, longitude = (39.5, -122) fh = Dataset(my_example_nc_file, mode='r') print(fh.variables.keys()) print(help(fh.variables['time'])) print(fh.variables['time'].name) ####time = fh['time'][:] ####print(time) times = fh.variables['time'] time_np = netCDF4.num2date(times[:],times.units) -pd.offsets.Hour(8) #print(time_np.shape) variables = {"SHTFL_L1_Avg_1" : "Sensible heat flux", "DSWRF_L1_Avg_1" : "Downward shortwave radiation flux", "CSDSF_L1_Avg_1" : "Clear sky downward solar flux", "DSWRF_L1_Avg_1" : "Downward shortwave radiation flux", "DLWRF_L1_Avg_1" : "Downward longwave radiation flux", "CSULF_L1_Avg_1" : "Clear sky upward longwave flux", "GFLUX_L1_Avg_1" : "Ground heat flux"} #downward_solar_flux_np = fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0]- fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0] downward_solar_flux_np = fh.variables["CSDLF_L1_Avg_1"][:, 0, 0] #(fh.variables["SHTFL_L1_Avg_1"][:,0,0] + fh.variables["LHTFL_L1_Avg_1"][:,0,0] + #fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0] - #fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0] + #fh.variables["GFLUX_L1_Avg_1"][:,0,0] ) #print(downward_solar_flux_np.shape) df = pd.DataFrame({'datetime': time_np, 'solar rad': downward_solar_flux_np}) #plt.plot(df['time'][:100], df['solar'][:100]) # save to a pickle file df.to_pickle('cfsr_2005_2010.pkl') #'CSDSF_L1_Avg_1' for key in fh.variables.keys(): variable = fh.variables[key] #variable = fh.variables[key][:] print(variable) print() def compare(): cimis = pd.read_pickle('cimis_2005_2010.pkl') cfsr = pd.read_pickle('cfsr_2005_2010.pkl') plt.plot(cfsr['datetime'][:250], cfsr['solar rad'][:250], label = "cfsr") plt.plot(cimis['datetime'][:1500], cimis['solar rad'][:1500], label = "cimis") plt.legend() #for i in range(10): # print(cfsr['datetime'][i], cimis['datetime'][i]) #print(cimis['datetime'][i]) load_CFSR_data() #compare()
mit
-8,473,513,301,691,959,000
26.938776
186
0.523201
false
samzhang111/scikit-learn
sklearn/grid_search.py
3
38126
""" The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from .base import BaseEstimator, is_classifier, clone from .base import MetaEstimatorMixin from .cross_validation import check_cv from .cross_validation import _fit_and_score from .externals.joblib import Parallel, delayed from .externals import six from .utils import check_random_state from .utils.random import sample_without_replacement from .utils.validation import _num_samples, indexable from .utils.metaestimators import if_delegate_has_method from .metrics.scorer import check_scoring from .exceptions import ChangedBehaviorWarning __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.grid_search import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d." % (grid_size, self.n_iter) + " For exhaustive searches, use GridSearchCV.") for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): params[k] = v.rvs() else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for v in p.values(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values should be a list.") if len(v) == 0: raise ValueError("Parameter values should be a non-empty " "list.") class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) if self.scoring is not None and hasattr(self.best_estimator_, 'score'): warnings.warn("The long-standing behavior to use the estimator's " "score function in {0}.score has changed. The " "scoring parameter is now used." "".format(self.__class__.__name__), ChangedBehaviorWarning) return self.scorer_(self.best_estimator_, X, y) @if_delegate_has_method(delegate='estimator') def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict(X) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='estimator') def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate='estimator') def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(X) @if_delegate_has_method(delegate='estimator') def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(Xt) def _fit(self, X, y, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) X, y = indexable(X, y) if y is not None: if len(y) != n_samples: raise ValueError('Target variable (y) has a different number ' 'of samples (%i) than data (X: %i samples)' % (len(y), n_samples)) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )( delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, grid_search, datasets >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = grid_search.GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape=None, degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a an hyperparameter grid. :func:`sklearn.cross_validation.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
bsd-3-clause
-8,075,441,600,595,167,000
37.433468
87
0.608168
false
Monika319/EWEF-1
Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py
1
1312
# -*- coding: utf-8 -*- """ Plot oscilloscope files from MultiSim """ import numpy as np import matplotlib.pyplot as plt import sys import os from matplotlib import rc rc('font',family="Consolas") files=["real_zad5_05f_p2.txt"] for NazwaPliku in files: print NazwaPliku Plik=open(NazwaPliku) #print DeltaT Dane=Plik.readlines()#[4:] DeltaT=float(Dane[2].split()[3].replace(",",".")) #M=len(Dane[4].split())/2 M=2 Dane=Dane[5:] Plik.close() print M Ys=[np.zeros(len(Dane)) for i in range(M)] for m in range(M): for i in range(len(Dane)): try: Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",",".")) except: print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split() #print i, Y[i] X=np.zeros_like(Ys[0]) for i in range(len(X)): X[i]=i*DeltaT for y in Ys: print max(y)-min(y) Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej" Nazwa=u"Z5W2" plt.title(u"Przebieg napięciowy\n"+Opis) plt.xlabel(u"Czas t [s]") plt.ylabel(u"Napięcie [V]") plt.plot(X,Ys[0],label=u"Wejście") plt.plot(X,Ys[1],label=u"Wyjście") plt.grid() plt.legend(loc="best") plt.savefig(Nazwa + ".png", bbox_inches='tight') plt.show()
gpl-2.0
-1,034,745,225,697,455,700
23.603774
72
0.578221
false
cBeaird/SemEval_Character-Identification-on-Multiparty-Dialogues
Classifiers/Random_Forest/Learning_Curve.py
1
2718
#!/usr/bin/env python """ File to plot the Learning Curve of a Random Forrest """ import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier import matplotlib as mpl from sklearn.model_selection import KFold from sklearn.model_selection import learning_curve mpl.use('TkAgg') import matplotlib.pyplot as plt __author__ = 'Brandon Watts' __credits__ = ['Casey Beaird', 'Chase Greco'] __license__ = 'MIT' __version__ = '0.1' def split_labels_and_vectors(csv_path,label_name): """ Method used to split a csv into two dataframes: labels and vectors :param csv_path: Path to the CSV file :param label_name: Name of the label column :return: label and vector dataframes """ df = pd.read_csv(csv_path) df_labels = df[label_name].values.tolist() df_vectors = df.drop([label_name], axis=1).values return df_labels, df_vectors def plot_curve(x, y, folds): """ Method used to plot the Learning Curve :param x: vectors :param y: labels :param folds: how many folds for cross-validation """ # Create and Train a classifier classifier = RandomForestClassifier(n_jobs=-1, max_features=None, oob_score=True, n_estimators=63, max_depth=30, min_samples_leaf=1) classifier.fit(x, y) # Create the Learning Curve for the Classifier train_sizes, train_scores, test_scores = learning_curve(classifier, x, y, n_jobs=-1, cv=folds, train_sizes=np.linspace(.1, 1.0, 5), verbose=0) # Extract all the stats for the plot train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) # Create the plot plt.figure() plt.title("RandomForestClassifier") plt.legend(loc="best") plt.xlabel("Training examples") plt.ylabel("Score") plt.gca().invert_yaxis() plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.ylim(-.1, 1.1) plt.show() cv = KFold(10, shuffle=True) labels, vectors = split_labels_and_vectors(csv_path="../../vectors.csv", label_name="Entity_ID") plot_curve(vectors, labels, cv)
mit
-2,796,158,065,937,514,500
34.298701
120
0.649007
false
ciyer/stockscape
src/python/Stockscape/stockscape/dsr.py
1
6058
#!/usr/bin/env python # -*- coding: utf-8 -*- """ dsr.py Utilities for the DeLong-Shiller Redux (dsr). Created by Chandrasekhar Ramakrishnan on 2017-10-02. Copyright (c) 2017 Chandrasekhar Ramakrishnan. All rights reserved. """ import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import seaborn as sns import pandas as pd import statsmodels.formula.api as smf def time_ticks(hop=10, start=1880, end=2020): """Standard tick points for DSR visualizations""" return [str(y) for y in np.arange(start, end, hop)] def periods_from_df(time_df): """Take a frame and return a breakdown of the consecutive periods represented in the frame.""" period = [time_df.index[0]] periods = [period] all_period_years = [time_df.index[0]] for i in range(1, len(time_df.index)): all_period_years.append(time_df.index[i]) if time_df.index[i - 1] + 1 == time_df.index[i]: period.append(time_df.index[i]) else: period = [time_df.index[i]] periods.append(period) period_labels = ["{}-{}".format(p[0], p[-1]) for p in periods] return all_period_years, [(p, pl) for p, pl in zip(periods, period_labels)] def cite_source(ax): ax.annotate('Source: Robert Shiller', (1, 0), (-2, -30), fontsize=8, xycoords='axes fraction', textcoords='offset points', va='bottom', ha='right') def split_to_and_since_delong(df): """Split the frame into time periods that DeLong analyzed and those since his article. :param df: The frame to split :return: Tuple with (to_delong, since_delong) """ to_delong_index = [d for d in df.index if d.year <= 2004 and d.month < 6] since_delong_index = [d for d in df.index if d.year > 2004 or (d.year is 2004 and d.month >= 6)] return df.loc[to_delong_index], df.loc[since_delong_index] def latest_index_label(ser_of_df): """Return a string for the latest date in the ser_of_df. :param ser_of_df: The series or frame to process :return: String (month(short) 'YY) for the date """ return ser_of_df.dropna().index[-1].strftime("%b '%y").lower() def split_cape_threshold_years(df, threshold=25, period_col='period'): """Split the df into those years above (or equal) and years below the CAPE threshold. In addition to splitting, add a columns labels the period. :param df: The data frame to apply the threshold to :param threshold: Defaults to 25 :return: (above_threshold with period and year columns, below_threshold) """ above_threshold = pd.DataFrame(df[df['cape'] >= threshold], copy=True) above_threshold['year'] = [i.year for i in above_threshold.index] above_threshold_years = above_threshold.groupby('year').count() above_threshold_years = above_threshold_years[above_threshold_years['price'] > 1] above_threshold_period_years, above_threshold_periods_and_labels = periods_from_df(above_threshold_years) for period, label in above_threshold_periods_and_labels: above_threshold.loc[above_threshold['year'].isin(period), period_col] = label all_high_cape_period_years_set = set(above_threshold_period_years) below_threshold = df.loc[[d for d in df.index if d.year not in all_high_cape_period_years_set]] return above_threshold, below_threshold def loss_indices(*dfs): """Return the indices where any of the dfs experienced losses. :param dfs: The data frames to analyze :return: Unique indices in which some df had a loss """ li = np.concatenate([np.array(df[df['returns'] < 0].index) for df in dfs]) return np.unique(li) def inversion_indices(df1, df2, column): """Return the indices in which df1[column] > df2[column] :param df1: A data frame :param df2: Another data frame :param column: A shared column :return: The indices where df1[column] > df2[column] """ return df1[df1[column] > df2[column]].index class DsrStylePrefs(object): """Utility class for styles/preferences/palettes""" def __init__(self): self.figure_full_size = (10.0, 7.5) self.figure_medium_size = (8.0, 5.5) self.figure_small_size = (8.0, 3.75) self.s_palette = sns.color_palette('Blues_r')[0:4] self.b_palette = sns.color_palette('Purples_r')[0:4] l_palette = sns.color_palette('Dark2') self.l_palette = [l_palette[i] for i in [1, 5, 3, 4, 0]] def use(self): """Applies styling to matplotlib """ if 'ciyer' in mpl.style.available: plt.style.use(['seaborn-darkgrid', 'ciyer']) plt.rcParams["figure.figsize"] = self.figure_full_size class LinearModel(object): """Bundle the relevant information from a linear regression.""" def __init__(self, ind, dep, df, pred_range): """Build a linear model of data :param ind: Independent variable :param dep: Dependent variable :param df: The frame to fit the model against :param pred_range: The range to predict on. """ self.ind = ind self.dep = dep self.df = df self.pred_range = pred_range self.lm = None self.predictions = None def fit_and_predict(self): self.lm = smf.ols(formula="{} ~ {}".format(self.dep, self.ind), data=self.df).fit() preds_input = pd.DataFrame({self.ind: self.pred_range}) self.predictions = self.lm.predict(preds_input) return self @property def x_intercept(self): return (-1 * self.lm.params[0]) / self.lm.params[1] @property def rsquared(self): return self.lm.rsquared @property def rsquared_computed(self): """Compute rsquared from the data""" preds_input = pd.DataFrame({'cape': self.df[self.ind]}) preds = self.lm.predict(preds_input) ss_res = np.sum(np.power((self.df[self.dep] - preds).dropna(), 2)) dep_mean = self.df[self.dep].mean() ss_tot = np.sum(np.power((self.df[self.dep] - dep_mean).dropna(), 2)) return 1 - (ss_res / ss_tot)
bsd-3-clause
6,513,744,225,531,852,000
35.715152
109
0.643447
false
ryfeus/lambda-packs
Skimage_numpy/source/scipy/optimize/minpack.py
11
30534
from __future__ import division, print_function, absolute_import import warnings from . import _minpack import numpy as np from numpy import (atleast_1d, dot, take, triu, shape, eye, transpose, zeros, product, greater, array, all, where, isscalar, asarray, inf, abs, finfo, inexact, issubdtype, dtype) from scipy.linalg import svd from scipy._lib._util import _asarray_validated, _lazywhere from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning from ._lsq import least_squares from ._lsq.common import make_strictly_feasible from ._lsq.least_squares import prepare_bounds error = _minpack.error __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) if (output_shape is not None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: if output_shape[1] == 1: return shape(res) msg = "%s: there is a mismatch between the input and output " \ "shape of the '%s' argument" % (checker, argname) func_name = getattr(thefunc, '__name__', None) if func_name: msg += " '%s'." % func_name else: msg += "." msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res)) raise TypeError(msg) if issubdtype(res.dtype, inexact): dt = res.dtype else: dt = dtype(float) return shape(res), dt def fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, epsfcn=None, factor=100, diag=None): """ Find the roots of a function. Return the roots of the (non-linear) equations defined by ``func(x) = 0`` given a starting estimate. Parameters ---------- func : callable ``f(x, *args)`` A function that takes at least one (possibly vector) argument. x0 : ndarray The starting estimate for the roots of ``func(x) = 0``. args : tuple, optional Any extra arguments to `func`. fprime : callable(x), optional A function to compute the Jacobian of `func` with derivatives across the rows. By default, the Jacobian will be estimated. full_output : bool, optional If True, return optional outputs. col_deriv : bool, optional Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float, optional The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int, optional The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple, optional If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). epsfcn : float, optional A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `epsfcn` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). infodict : dict A dictionary of optional outputs with the keys: ``nfev`` number of function calls ``njev`` number of Jacobian calls ``fvec`` function evaluated at the output ``fjac`` the orthogonal matrix, q, produced by the QR factorization of the final approximate Jacobian matrix, stored column wise ``r`` upper triangular matrix produced by QR factorization of the same matrix ``qtf`` the vector ``(transpose(q) * fvec)`` ier : int An integer flag. Set to 1 if a solution was found, otherwise refer to `mesg` for more information. mesg : str If no solution is found, `mesg` details the cause of failure. See Also -------- root : Interface to root finding algorithms for multivariate functions. See the 'hybr' `method` in particular. Notes ----- ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. """ options = {'col_deriv': col_deriv, 'xtol': xtol, 'maxfev': maxfev, 'band': band, 'eps': epsfcn, 'factor': factor, 'diag': diag} res = _root_hybr(func, x0, args, jac=fprime, **options) if full_output: x = res['x'] info = dict((k, res.get(k)) for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res) info['fvec'] = res['fun'] return x, info, res['status'], res['message'] else: status = res['status'] msg = res['message'] if status == 0: raise TypeError(msg) elif status == 1: pass elif status in [2, 3, 4, 5]: warnings.warn(msg, RuntimeWarning) else: raise TypeError(msg) return res['x'] def _root_hybr(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, factor=100, diag=None, **unknown_options): """ Find the roots of a multivariate function using MINPACK's hybrd and hybrj routines (modified Powell method). Options ------- col_deriv : bool Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). eps : float A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `eps` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) epsfcn = eps x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) if epsfcn is None: epsfcn = finfo(dtype).eps Dfun = jac if Dfun is None: if band is None: ml, mu = -10, -10 else: ml, mu = band[:2] if maxfev == 0: maxfev = 200 * (n + 1) retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, ml, mu, epsfcn, factor, diag) else: _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) if (maxfev == 0): maxfev = 100 * (n + 1) retval = _minpack._hybrj(func, Dfun, x0, args, 1, col_deriv, xtol, maxfev, factor, diag) x, status = retval[0], retval[-1] errors = {0: "Improper input parameters were entered.", 1: "The solution converged.", 2: "The number of calls to function has " "reached maxfev = %d." % maxfev, 3: "xtol=%f is too small, no further improvement " "in the approximate\n solution " "is possible." % xtol, 4: "The iteration is not making good progress, as measured " "by the \n improvement from the last five " "Jacobian evaluations.", 5: "The iteration is not making good progress, " "as measured by the \n improvement from the last " "ten iterations.", 'unknown': "An error occurred."} info = retval[1] info['fun'] = info.pop('fvec') sol = OptimizeResult(x=x, success=(status == 1), status=status) sol.update(info) try: sol['message'] = errors[status] except KeyError: info['message'] = errors['unknown'] return sol def leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable should take at least one (possibly length N vector) argument and returns M floating point numbers. It must not return NaNs or fitting might fail. x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional non-zero to return all optional outputs. col_deriv : bool, optional non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray Uses the fjac and ipvt optional outputs to construct an estimate of the jacobian around the solution. None if a singular matrix encountered (indicates very flat curvature in some direction). This matrix must be multiplied by the residual variance to get the covariance of the parameter estimates -- see curve_fit. infodict : dict a dictionary of optional outputs with the key s: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most %f and the " "relative error between two consecutive " "iterates is at \n most %f" % (ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible.""" % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError], 'unknown': ["Unknown error.", TypeError]} info = retval[-1] # The FORTRAN return value if info not in [1, 2, 3, 4] and not full_output: if info in [5, 6, 7, 8]: warnings.warn(errors[info][0], RuntimeWarning) else: try: raise errors[info][1](errors[info][0]) except KeyError: raise errors['unknown'][1](errors['unknown'][0]) mesg = errors[info][0] if full_output: cov_x = None if info in [1, 2, 3, 4]: from numpy.dual import inv from numpy.linalg import LinAlgError perm = take(eye(n), retval[1]['ipvt'] - 1, 0) r = triu(transpose(retval[1]['fjac'])[:n, :]) R = dot(r, perm) try: cov_x = inv(dot(transpose(R), R)) except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (mesg, info) else: return (retval[0], info) def _wrap_func(func, xdata, ydata, weights): if weights is None: def func_wrapped(params): return func(xdata, *params) - ydata else: def func_wrapped(params): return weights * (func(xdata, *params) - ydata) return func_wrapped def _wrap_jac(jac, xdata, weights): if weights is None: def jac_wrapped(params): return jac(xdata, *params) else: def jac_wrapped(params): return weights[:, np.newaxis] * np.asarray(jac(xdata, *params)) return jac_wrapped def _initialize_feasible(lb, ub): p0 = np.ones_like(lb) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) mask = lb_finite & ub_finite p0[mask] = 0.5 * (lb[mask] + ub[mask]) mask = lb_finite & ~ub_finite p0[mask] = lb[mask] + 1 mask = ~lb_finite & ub_finite p0[mask] = ub[mask] - 1 return p0 def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, **kwargs): """ Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps`` Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors. The independent variable where the data is measured. ydata : M-length sequence The dependent data --- nominally f(xdata, ...) p0 : None, scalar, or N-length sequence, optional Initial guess for the parameters. If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence, optional If not None, the uncertainties in the ydata array. These are used as weights in the least-squares problem i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )`` If None, the uncertainties are assumed to be 1. absolute_sigma : bool, optional If False, `sigma` denotes relative weights of the data points. The returned covariance matrix `pcov` is based on *estimated* errors in the data, and is not affected by the overall magnitude of the values in `sigma`. Only the relative magnitudes of the `sigma` values matter. If True, `sigma` describes one standard deviation errors of the input data points. The estimated covariance in `pcov` is based on these values. check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True. bounds : 2-tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters.) Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. .. versionadded:: 0.17 method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared error of ``f(xdata, *popt) - ydata`` is minimized pcov : 2d array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters use ``perr = np.sqrt(np.diag(pcov))``. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. Examples -------- >>> import numpy as np >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> ydata = y + 0.2 * np.random.normal(size=len(xdata)) >>> popt, pcov = curve_fit(func, xdata, ydata) Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2`` and ``0 < c < 1``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.])) """ if p0 is None: # determine number of parameters by inspecting the function from scipy._lib._util import getargspec_no_self as _getargspec args, varargs, varkw, defaults = _getargspec(f) if len(args) < 2: raise ValueError("Unable to determine number of fit parameters.") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError("Method 'lm' only works for unconstrained problems. " "Use 'trf' or 'dogbox' instead.") # NaNs can not be handled if check_finite: ydata = np.asarray_chkfinite(ydata) else: ydata = np.asarray(ydata) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata) else: xdata = np.asarray(xdata) weights = 1.0 / asarray(sigma) if sigma is not None else None func = _wrap_func(f, xdata, ydata, weights) if callable(jac): jac = _wrap_jac(jac, xdata, weights) elif jac is None and method != 'lm': jac = '2-point' if method == 'lm': # Remove full_output from kwargs, otherwise we're passing it in twice. return_full = kwargs.pop('full_output', False) res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) else: res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError("Optimal parameters not found: " + res.message) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) return_full = False warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ydata.size > p0.size: s_sq = cost / (ydata.size - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x = x.reshape((n,)) fvec = atleast_1d(fcn(x, *args)) m = len(fvec) fvec = fvec.reshape((m,)) ldfjac = m fjac = atleast_1d(Dfcn(x, *args)) fjac = fjac.reshape((m, n)) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,), float) err = zeros((m,), float) fvecp = None _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) fvecp = atleast_1d(fcn(xp, *args)) fvecp = fvecp.reshape((m,)) _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) good = (product(greater(err, 0.5), axis=0)) return (good, err) def _del2(p0, p1, d): return p0 - np.square(p1 - p0) / d def _relerr(actual, desired): return (actual - desired) / desired def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): p0 = x0 for i in range(maxiter): p1 = func(p0, *args) if use_accel: p2 = func(p1, *args) d = p2 - 2.0 * p1 + p0 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) else: p = p1 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) if np.all(np.abs(relerr) < xtol): return p p0 = p msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): """ Find a fixed point of the function. Given a function of one or more variables and a starting point, find a fixed-point of the function: i.e. where ``func(x0) == x0``. Parameters ---------- func : function Function to evaluate. x0 : array_like Fixed point of function. args : tuple, optional Extra arguments to `func`. xtol : float, optional Convergence tolerance, defaults to 1e-08. maxiter : int, optional Maximum number of iterations, defaults to 500. method : {"del2", "iteration"}, optional Method of finding the fixed-point, defaults to "del2" which uses Steffensen's Method with Aitken's ``Del^2`` convergence acceleration [1]_. The "iteration" method simply iterates the function until convergence is detected, without attempting to accelerate the convergence. References ---------- .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 Examples -------- >>> from scipy import optimize >>> def func(x, c1, c2): ... return np.sqrt(c1/(x+c2)) >>> c1 = np.array([10,12.]) >>> c2 = np.array([3, 5.]) >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) array([ 1.4920333 , 1.37228132]) """ use_accel = {'del2': True, 'iteration': False}[method] x0 = _asarray_validated(x0, as_inexact=True) return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
mit
-4,117,547,933,951,803,400
36.100851
87
0.588917
false
robwarm/gpaw-symm
gpaw/test/pw/lfc.py
1
1771
import numpy as np from gpaw.test import equal from gpaw.grid_descriptor import GridDescriptor from gpaw.spline import Spline import gpaw.mpi as mpi from gpaw.lfc import LocalizedFunctionsCollection as LFC from gpaw.wavefunctions.pw import PWDescriptor, PWLFC from gpaw.kpt_descriptor import KPointDescriptor x = 2.0 rc = 3.5 r = np.linspace(0, rc, 100) n = 40 a = 8.0 gd = GridDescriptor((n, n, n), (a, a, a), comm=mpi.serial_comm) kpts = np.array([(0.25, 0.25, 0.0)]) kd = KPointDescriptor(kpts) spos_ac = np.array([(0.15, 0.5, 0.95)]) pd = PWDescriptor(45, gd, complex, kd) eikr = np.ascontiguousarray(np.exp(2j * np.pi * np.dot(np.indices(gd.N_c).T, (kpts / gd.N_c).T).T)[0]) from gpaw.fftw import FFTPlan print(FFTPlan) for l in range(3): print(l) s = Spline(l, rc, 2 * x**1.5 / np.pi * np.exp(-x * r**2)) lfc1 = LFC(gd, [[s]], kd, dtype=complex) lfc2 = PWLFC([[s]], pd) c_axi = {0: np.zeros((1, 2 * l + 1), complex)} c_axi[0][0, 0] = 1.9 - 4.5j c_axiv = {0: np.zeros((1, 2 * l + 1, 3), complex)} b1 = gd.zeros(1, dtype=complex) b2 = pd.zeros(1, dtype=complex) for lfc, b in [(lfc1, b1), (lfc2, b2)]: lfc.set_positions(spos_ac) lfc.add(b, c_axi, 0) b2 = pd.ifft(b2[0]) * eikr equal(abs(b2-b1[0]).max(), 0, 0.001) b1 = eikr[None] b2 = pd.fft(b1[0] * 0 + 1).reshape((1, -1)) results = [] results2 = [] for lfc, b in [(lfc1, b1), (lfc2, b2)]: lfc.integrate(b, c_axi, 0) results.append(c_axi[0][0].copy()) lfc.derivative(b, c_axiv, 0) results2.append(c_axiv[0][0].copy()) equal(abs(np.ptp(results2, 0)).max(), 0, 1e-7) equal(abs(np.ptp(results, 0)).max(), 0, 3e-8)
gpl-3.0
1,479,708,894,144,279,600
26.671875
82
0.568041
false
apatil/covariance-prior
cov_prior/givens_step.py
1
1366
# Author: Anand Patil # Date: 2 June 2009 # License: Creative Commons BY-NC-SA #################################### from fast_givens import fg import pymc as pm import numpy as np from ortho_basis import OrthogonalBasis __all__ = ['fast_givens', 'GivensStepper'] def fast_givens(o,i,j,t): "Givens rotates the matrix o." if i==j: raise ValueError, 'i must be different from j.' oc = o.copy('F') fg(o,oc,i+1,j+1,t) return oc class GivensStepper(pm.Metropolis): """docstring for GivensStepper""" def __init__(self, o, kappa=1.): pm.Metropolis.__init__(self, o) self.o = o self.adaptive_scale_factor = 1./kappa def propose(self): t_p = pm.rvon_mises(0, 1./self.adaptive_scale_factor) i_p = np.random.randint(self.o.n-1) j_p = np.random.randint(i_p+1, self.o.n) self.o.value = fast_givens(self.o.value, i_p, j_p, t_p) def tune(self, *args, **kwargs): if self.adaptive_scale_factor>=1e6: return False else: return pm.Metropolis.tune(self, *args, **kwargs) @staticmethod def competence(o): if isinstance(o, OrthogonalBasis): if o.value.shape[0] > 1: return 3 else: return 0 else: return 0
mit
5,670,105,406,355,458,000
26.34
63
0.541728
false
ndingwall/scikit-learn
examples/linear_model/plot_poisson_regression_non_normal_loss.py
10
22927
""" ====================================== Poisson regression and non-normal loss ====================================== This example illustrates the use of log-linear Poisson regression on the `French Motor Third-Party Liability Claims dataset <https://www.openml.org/d/41214>`_ from [1]_ and compares it with a linear model fitted with the usual least squared error and a non-linear GBRT model fitted with the Poisson loss (and a log-link). A few definitions: - A **policy** is a contract between an insurance company and an individual: the **policyholder**, that is, the vehicle driver in this case. - A **claim** is the request made by a policyholder to the insurer to compensate for a loss covered by the insurance. - The **exposure** is the duration of the insurance coverage of a given policy, in years. - The claim **frequency** is the number of claims divided by the exposure, typically measured in number of claims per year. In this dataset, each sample corresponds to an insurance policy. Available features include driver age, vehicle age, vehicle power, etc. Our goal is to predict the expected frequency of claims following car accidents for a new policyholder given the historical data over a population of policyholders. .. [1] A. Noll, R. Salzmann and M.V. Wuthrich, Case Study: French Motor Third-Party Liability Claims (November 8, 2018). `doi:10.2139/ssrn.3164764 <http://dx.doi.org/10.2139/ssrn.3164764>`_ """ print(__doc__) # Authors: Christian Lorentzen <lorentzen.ch@gmail.com> # Roman Yurchak <rth.yurchak@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt import pandas as pd ############################################################################## # The French Motor Third-Party Liability Claims dataset # ----------------------------------------------------- # # Let's load the motor claim dataset from OpenML: # https://www.openml.org/d/41214 from sklearn.datasets import fetch_openml df = fetch_openml(data_id=41214, as_frame=True).frame df # %% # The number of claims (``ClaimNb``) is a positive integer that can be modeled # as a Poisson distribution. It is then assumed to be the number of discrete # events occurring with a constant rate in a given time interval (``Exposure``, # in units of years). # # Here we want to model the frequency ``y = ClaimNb / Exposure`` conditionally # on ``X`` via a (scaled) Poisson distribution, and use ``Exposure`` as # ``sample_weight``. df["Frequency"] = df["ClaimNb"] / df["Exposure"] print("Average Frequency = {}" .format(np.average(df["Frequency"], weights=df["Exposure"]))) print("Fraction of exposure with zero claims = {0:.1%}" .format(df.loc[df["ClaimNb"] == 0, "Exposure"].sum() / df["Exposure"].sum())) fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(16, 4)) ax0.set_title("Number of claims") _ = df["ClaimNb"].hist(bins=30, log=True, ax=ax0) ax1.set_title("Exposure in years") _ = df["Exposure"].hist(bins=30, log=True, ax=ax1) ax2.set_title("Frequency (number of claims per year)") _ = df["Frequency"].hist(bins=30, log=True, ax=ax2) # %% # The remaining columns can be used to predict the frequency of claim events. # Those columns are very heterogeneous with a mix of categorical and numeric # variables with different scales, possibly very unevenly distributed. # # In order to fit linear models with those predictors it is therefore # necessary to perform standard feature transformations as follows: from sklearn.pipeline import make_pipeline from sklearn.preprocessing import FunctionTransformer, OneHotEncoder from sklearn.preprocessing import StandardScaler, KBinsDiscretizer from sklearn.compose import ColumnTransformer log_scale_transformer = make_pipeline( FunctionTransformer(np.log, validate=False), StandardScaler() ) linear_model_preprocessor = ColumnTransformer( [ ("passthrough_numeric", "passthrough", ["BonusMalus"]), ("binned_numeric", KBinsDiscretizer(n_bins=10), ["VehAge", "DrivAge"]), ("log_scaled_numeric", log_scale_transformer, ["Density"]), ("onehot_categorical", OneHotEncoder(), ["VehBrand", "VehPower", "VehGas", "Region", "Area"]), ], remainder="drop", ) # %% # A constant prediction baseline # ------------------------------ # # It is worth noting that more than 93% of policyholders have zero claims. If # we were to convert this problem into a binary classification task, it would # be significantly imbalanced, and even a simplistic model that would only # predict mean can achieve an accuracy of 93%. # # To evaluate the pertinence of the used metrics, we will consider as a # baseline a "dummy" estimator that constantly predicts the mean frequency of # the training sample. from sklearn.dummy import DummyRegressor from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split df_train, df_test = train_test_split(df, test_size=0.33, random_state=0) dummy = Pipeline([ ("preprocessor", linear_model_preprocessor), ("regressor", DummyRegressor(strategy='mean')), ]).fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) ############################################################################## # Let's compute the performance of this constant prediction baseline with 3 # different regression metrics: from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_poisson_deviance def score_estimator(estimator, df_test): """Score an estimator on the test set.""" y_pred = estimator.predict(df_test) print("MSE: %.3f" % mean_squared_error(df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"])) print("MAE: %.3f" % mean_absolute_error(df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"])) # Ignore non-positive predictions, as they are invalid for # the Poisson deviance. mask = y_pred > 0 if (~mask).any(): n_masked, n_samples = (~mask).sum(), mask.shape[0] print(f"WARNING: Estimator yields invalid, non-positive predictions " f" for {n_masked} samples out of {n_samples}. These predictions " f"are ignored when computing the Poisson deviance.") print("mean Poisson deviance: %.3f" % mean_poisson_deviance(df_test["Frequency"][mask], y_pred[mask], sample_weight=df_test["Exposure"][mask])) print("Constant mean frequency evaluation:") score_estimator(dummy, df_test) # %% # (Generalized) linear models # --------------------------- # # We start by modeling the target variable with the (l2 penalized) least # squares linear regression model, more comonly known as Ridge regression. We # use a low penalization `alpha`, as we expect such a linear model to under-fit # on such a large dataset. from sklearn.linear_model import Ridge ridge_glm = Pipeline([ ("preprocessor", linear_model_preprocessor), ("regressor", Ridge(alpha=1e-6)), ]).fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) # %% # The Poisson deviance cannot be computed on non-positive values predicted by # the model. For models that do return a few non-positive predictions (e.g. # :class:`~sklearn.linear_model.Ridge`) we ignore the corresponding samples, # meaning that the obtained Poisson deviance is approximate. An alternative # approach could be to use :class:`~sklearn.compose.TransformedTargetRegressor` # meta-estimator to map ``y_pred`` to a strictly positive domain. print("Ridge evaluation:") score_estimator(ridge_glm, df_test) # %% # Next we fit the Poisson regressor on the target variable. We set the # regularization strength ``alpha`` to approximately 1e-6 over number of # samples (i.e. `1e-12`) in order to mimic the Ridge regressor whose L2 penalty # term scales differently with the number of samples. # # Since the Poisson regressor internally models the log of the expected target # value instead of the expected value directly (log vs identity link function), # the relationship between X and y is not exactly linear anymore. Therefore the # Poisson regressor is called a Generalized Linear Model (GLM) rather than a # vanilla linear model as is the case for Ridge regression. from sklearn.linear_model import PoissonRegressor n_samples = df_train.shape[0] poisson_glm = Pipeline([ ("preprocessor", linear_model_preprocessor), ("regressor", PoissonRegressor(alpha=1e-12, max_iter=300)) ]) poisson_glm.fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) print("PoissonRegressor evaluation:") score_estimator(poisson_glm, df_test) # %% # Gradient Boosting Regression Trees for Poisson regression # --------------------------------------------------------- # # Finally, we will consider a non-linear model, namely Gradient Boosting # Regression Trees. Tree-based models do not require the categorical data to be # one-hot encoded: instead, we can encode each category label with an arbitrary # integer using :class:`~sklearn.preprocessing.OrdinalEncoder`. With this # encoding, the trees will treat the categorical features as ordered features, # which might not be always a desired behavior. However this effect is limited # for deep enough trees which are able to recover the categorical nature of the # features. The main advantage of the # :class:`~sklearn.preprocessing.OrdinalEncoder` over the # :class:`~sklearn.preprocessing.OneHotEncoder` is that it will make training # faster. # # Gradient Boosting also gives the possibility to fit the trees with a Poisson # loss (with an implicit log-link function) instead of the default # least-squares loss. Here we only fit trees with the Poisson loss to keep this # example concise. from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.preprocessing import OrdinalEncoder tree_preprocessor = ColumnTransformer( [ ("categorical", OrdinalEncoder(), ["VehBrand", "VehPower", "VehGas", "Region", "Area"]), ("numeric", "passthrough", ["VehAge", "DrivAge", "BonusMalus", "Density"]), ], remainder="drop", ) poisson_gbrt = Pipeline([ ("preprocessor", tree_preprocessor), ("regressor", HistGradientBoostingRegressor(loss="poisson", max_leaf_nodes=128)), ]) poisson_gbrt.fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) print("Poisson Gradient Boosted Trees evaluation:") score_estimator(poisson_gbrt, df_test) # %% # Like the Poisson GLM above, the gradient boosted trees model minimizes # the Poisson deviance. However, because of a higher predictive power, # it reaches lower values of Poisson deviance. # # Evaluating models with a single train / test split is prone to random # fluctuations. If computing resources allow, it should be verified that # cross-validated performance metrics would lead to similar conclusions. # # The qualitative difference between these models can also be visualized by # comparing the histogram of observed target values with that of predicted # values: fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(16, 6), sharey=True) fig.subplots_adjust(bottom=0.2) n_bins = 20 for row_idx, label, df in zip(range(2), ["train", "test"], [df_train, df_test]): df["Frequency"].hist(bins=np.linspace(-1, 30, n_bins), ax=axes[row_idx, 0]) axes[row_idx, 0].set_title("Data") axes[row_idx, 0].set_yscale('log') axes[row_idx, 0].set_xlabel("y (observed Frequency)") axes[row_idx, 0].set_ylim([1e1, 5e5]) axes[row_idx, 0].set_ylabel(label + " samples") for idx, model in enumerate([ridge_glm, poisson_glm, poisson_gbrt]): y_pred = model.predict(df) pd.Series(y_pred).hist(bins=np.linspace(-1, 4, n_bins), ax=axes[row_idx, idx+1]) axes[row_idx, idx + 1].set( title=model[-1].__class__.__name__, yscale='log', xlabel="y_pred (predicted expected Frequency)" ) plt.tight_layout() # %% # The experimental data presents a long tail distribution for ``y``. In all # models, we predict the expected frequency of a random variable, so we will # have necessarily fewer extreme values than for the observed realizations of # that random variable. This explains that the mode of the histograms of model # predictions doesn't necessarily correspond to the smallest value. # Additionally, the normal distribution used in ``Ridge`` has a constant # variance, while for the Poisson distribution used in ``PoissonRegressor`` and # ``HistGradientBoostingRegressor``, the variance is proportional to the # predicted expected value. # # Thus, among the considered estimators, ``PoissonRegressor`` and # ``HistGradientBoostingRegressor`` are a-priori better suited for modeling the # long tail distribution of the non-negative data as compared to the ``Ridge`` # model which makes a wrong assumption on the distribution of the target # variable. # # The ``HistGradientBoostingRegressor`` estimator has the most flexibility and # is able to predict higher expected values. # # Note that we could have used the least squares loss for the # ``HistGradientBoostingRegressor`` model. This would wrongly assume a normal # distributed response variable as does the `Ridge` model, and possibly # also lead to slightly negative predictions. However the gradient boosted # trees would still perform relatively well and in particular better than # ``PoissonRegressor`` thanks to the flexibility of the trees combined with the # large number of training samples. # # Evaluation of the calibration of predictions # -------------------------------------------- # # To ensure that estimators yield reasonable predictions for different # policyholder types, we can bin test samples according to ``y_pred`` returned # by each model. Then for each bin, we compare the mean predicted ``y_pred``, # with the mean observed target: from sklearn.utils import gen_even_slices def _mean_frequency_by_risk_group(y_true, y_pred, sample_weight=None, n_bins=100): """Compare predictions and observations for bins ordered by y_pred. We order the samples by ``y_pred`` and split it in bins. In each bin the observed mean is compared with the predicted mean. Parameters ---------- y_true: array-like of shape (n_samples,) Ground truth (correct) target values. y_pred: array-like of shape (n_samples,) Estimated target values. sample_weight : array-like of shape (n_samples,) Sample weights. n_bins: int Number of bins to use. Returns ------- bin_centers: ndarray of shape (n_bins,) bin centers y_true_bin: ndarray of shape (n_bins,) average y_pred for each bin y_pred_bin: ndarray of shape (n_bins,) average y_pred for each bin """ idx_sort = np.argsort(y_pred) bin_centers = np.arange(0, 1, 1/n_bins) + 0.5/n_bins y_pred_bin = np.zeros(n_bins) y_true_bin = np.zeros(n_bins) for n, sl in enumerate(gen_even_slices(len(y_true), n_bins)): weights = sample_weight[idx_sort][sl] y_pred_bin[n] = np.average( y_pred[idx_sort][sl], weights=weights ) y_true_bin[n] = np.average( y_true[idx_sort][sl], weights=weights ) return bin_centers, y_true_bin, y_pred_bin print(f"Actual number of claims: {df_test['ClaimNb'].sum()}") fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) plt.subplots_adjust(wspace=0.3) for axi, model in zip(ax.ravel(), [ridge_glm, poisson_glm, poisson_gbrt, dummy]): y_pred = model.predict(df_test) y_true = df_test["Frequency"].values exposure = df_test["Exposure"].values q, y_true_seg, y_pred_seg = _mean_frequency_by_risk_group( y_true, y_pred, sample_weight=exposure, n_bins=10) # Name of the model after the estimator used in the last step of the # pipeline. print(f"Predicted number of claims by {model[-1]}: " f"{np.sum(y_pred * exposure):.1f}") axi.plot(q, y_pred_seg, marker='x', linestyle="--", label="predictions") axi.plot(q, y_true_seg, marker='o', linestyle="--", label="observations") axi.set_xlim(0, 1.0) axi.set_ylim(0, 0.5) axi.set( title=model[-1], xlabel='Fraction of samples sorted by y_pred', ylabel='Mean Frequency (y_pred)' ) axi.legend() plt.tight_layout() # %% # The dummy regression model predicts a constant frequency. This model does not # attribute the same tied rank to all samples but is none-the-less globally # well calibrated (to estimate the mean frequency of the entire population). # # The ``Ridge`` regression model can predict very low expected frequencies that # do not match the data. It can therefore severly under-estimate the risk for # some policyholders. # # ``PoissonRegressor`` and ``HistGradientBoostingRegressor`` show better # consistency between predicted and observed targets, especially for low # predicted target values. # # The sum of all predictions also confirms the calibration issue of the # ``Ridge`` model: it under-estimates by more than 3% the total number of # claims in the test set while the other three models can approximately recover # the total number of claims of the test portfolio. # # Evaluation of the ranking power # ------------------------------- # # For some business applications, we are interested in the ability of the model # to rank the riskiest from the safest policyholders, irrespective of the # absolute value of the prediction. In this case, the model evaluation would # cast the problem as a ranking problem rather than a regression problem. # # To compare the 3 models from this perspective, one can plot the cumulative # proportion of claims vs the cumulative proportion of exposure for the test # samples order by the model predictions, from safest to riskiest according to # each model. # # This plot is called a Lorenz curve and can be summarized by the Gini index: from sklearn.metrics import auc def lorenz_curve(y_true, y_pred, exposure): y_true, y_pred = np.asarray(y_true), np.asarray(y_pred) exposure = np.asarray(exposure) # order samples by increasing predicted risk: ranking = np.argsort(y_pred) ranked_frequencies = y_true[ranking] ranked_exposure = exposure[ranking] cumulated_claims = np.cumsum(ranked_frequencies * ranked_exposure) cumulated_claims /= cumulated_claims[-1] cumulated_exposure = np.cumsum(ranked_exposure) cumulated_exposure /= cumulated_exposure[-1] return cumulated_exposure, cumulated_claims fig, ax = plt.subplots(figsize=(8, 8)) for model in [dummy, ridge_glm, poisson_glm, poisson_gbrt]: y_pred = model.predict(df_test) cum_exposure, cum_claims = lorenz_curve(df_test["Frequency"], y_pred, df_test["Exposure"]) gini = 1 - 2 * auc(cum_exposure, cum_claims) label = "{} (Gini: {:.2f})".format(model[-1], gini) ax.plot(cum_exposure, cum_claims, linestyle="-", label=label) # Oracle model: y_pred == y_test cum_exposure, cum_claims = lorenz_curve(df_test["Frequency"], df_test["Frequency"], df_test["Exposure"]) gini = 1 - 2 * auc(cum_exposure, cum_claims) label = "Oracle (Gini: {:.2f})".format(gini) ax.plot(cum_exposure, cum_claims, linestyle="-.", color="gray", label=label) # Random Baseline ax.plot([0, 1], [0, 1], linestyle="--", color="black", label="Random baseline") ax.set( title="Lorenz curves by model", xlabel='Cumulative proportion of exposure (from safest to riskiest)', ylabel='Cumulative proportion of claims' ) ax.legend(loc="upper left") # %% # As expected, the dummy regressor is unable to correctly rank the samples and # therefore performs the worst on this plot. # # The tree-based model is significantly better at ranking policyholders by risk # while the two linear models perform similarly. # # All three models are significantly better than chance but also very far from # making perfect predictions. # # This last point is expected due to the nature of the problem: the occurrence # of accidents is mostly dominated by circumstantial causes that are not # captured in the columns of the dataset and can indeed be considered as purely # random. # # The linear models assume no interactions between the input variables which # likely causes under-fitting. Inserting a polynomial feature extractor # (:func:`~sklearn.preprocessing.PolynomialFeatures`) indeed increases their # discrimative power by 2 points of Gini index. In particular it improves the # ability of the models to identify the top 5% riskiest profiles. # # Main takeaways # -------------- # # - The performance of the models can be evaluated by their ability to yield # well-calibrated predictions and a good ranking. # # - The calibration of the model can be assessed by plotting the mean observed # value vs the mean predicted value on groups of test samples binned by # predicted risk. # # - The least squares loss (along with the implicit use of the identity link # function) of the Ridge regression model seems to cause this model to be # badly calibrated. In particular, it tends to underestimate the risk and can # even predict invalid negative frequencies. # # - Using the Poisson loss with a log-link can correct these problems and lead # to a well-calibrated linear model. # # - The Gini index reflects the ability of a model to rank predictions # irrespective of their absolute values, and therefore only assess their # ranking power. # # - Despite the improvement in calibration, the ranking power of both linear # models are comparable and well below the ranking power of the Gradient # Boosting Regression Trees. # # - The Poisson deviance computed as an evaluation metric reflects both the # calibration and the ranking power of the model. It also makes a linear # assumption on the ideal relationship between the expected value and the # variance of the response variable. For the sake of conciseness we did not # check whether this assumption holds. # # - Traditional regression metrics such as Mean Squared Error and Mean Absolute # Error are hard to meaningfully interpret on count values with many zeros. plt.show()
bsd-3-clause
8,887,515,850,015,059,000
39.012216
79
0.689973
false
ezekial4/atomic_neu
examples/profiles.py
1
3306
""" typical carbon content is n_c / n_e = 0.05 """ import numpy as np import matplotlib.pyplot as plt import atomic from ensemble_average import annotate_lines def parabolic_profile(y0): x = np.linspace(1., 0, 50) y = 1 - x**2 y *= y0 return x, y r, temperature = parabolic_profile(3e3) r, density = parabolic_profile(1e19) try: ad except NameError: from atomic.pec import TransitionPool ad = atomic.element('argon') tp = TransitionPool.from_adf15('adas_data/pec/transport_llu#ar*.dat') ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad) eq = atomic.CoronalEquilibrium(ad) y = eq.ionisation_stage_distribution(temperature, density) ne_tau = np.array([1e-1, 1e-2, 1e-3]) impurity_fraction = 0.05 texts = ['$10^{%d}$' % i for i in np.log10(ne_tau)] try: tau_ss except NameError: t_normalized = np.logspace(-4, 0, 500) t_normalized -= t_normalized[0] times = t_normalized rt = atomic.RateEquations(ad) yy = rt.solve(times, temperature, density) tau_ss = yy.steady_state_time() y_bar = yy.ensemble_average() # prepare plots f = plt.figure(1); f.clf() ax1 = f.add_subplot(511) ax2 = f.add_subplot(512, sharex=ax1) #ax3 = f.add_subplot(513, sharex=ax1) ax4 = f.add_subplot(513, sharex=ax1) ax5 = f.add_subplot(514, sharex=ax1) ax6 = f.add_subplot(515, sharex=ax1) # density and temperature profiles ax = ax1 ax.plot(r,density/1e19, r, temperature/1e3) ax.set_xlabel(r'$\rho$') # steady state time ax = ax2 line, = ax.semilogy(r, tau_ss) ax.set_ylabel(r'$\tau_\mathrm{ss}\ [s]$') ax.set_ylim(ymax=2) # fractional abundance #ax = ax3 #lines_abundance = ax.semilogy(r, y.y.T*100) #ax.set_ylim(0.3, 400) #yy.y_coronal.replot_colored(line, lines_abundance) def normalized_gradient(x, y): return -np.gradient(y)/np.gradient(x)/y # fractional abundance, Zeff, Zmean y_selected = y_bar.select_times(ne_tau) for y in y_selected: #ax3.semilogy(r, y.y[-1,:].T*100, color='black') #lines = ax4.plot(r, y.effective_charge(impurity_fraction), # color='black', ls='--') rad = atomic.Radiation(y, impurity_fraction=impurity_fraction) total_power = rad.power['total'] ax4.plot(r, total_power) radiation_parameter = total_power / (impurity_fraction * density) line, = ax5.plot(r, radiation_parameter) rlte = normalized_gradient(r, temperature) rlrad = normalized_gradient(r, total_power) ax6.plot(r, rlrad) ax6.plot(r, rlte, 'k--') ax6.set_ylim(0,10) #from matplotlib.ticker import FormatStrFormatter #ax = ax3 #major_formatter = FormatStrFormatter('$%d\%%$') #ax.yaxis.set_major_formatter(major_formatter) #y.annotate_ionisation_stages(lines_abundance) from matplotlib.ticker import MaxNLocator ax = ax4 locator = MaxNLocator(4) ax.set_ylabel(r'$Z_\mathrm{eff}$') ax.yaxis.set_major_locator(locator) lo, hi = ax.get_ylim() ax.set_ylim(lo, 1.1 * hi) annotate_lines(texts, ha='left', va='bottom', ax=ax) # radiation profile ax = ax5 ax.set_yticks(ax.get_yticks()[:-1:2]) annotate_lines(texts, ha='left', va='bottom', ax=ax) locator = MaxNLocator(4) ax.yaxis.set_major_locator(locator) # position subplots for ax in f.axes: if not ax.is_last_row(): ax.get_xaxis().label.set_visible(False) ax.label_outer() #f.subplots_adjust(hspace=0) plt.draw() plt.show()
mit
-6,893,652,996,516,458,000
22.784173
73
0.680278
false